^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mtd/cfi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mtd/partitions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "rkflash_blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "rkflash_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "sfc_nand.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "sfc_nand_mtd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifdef CONFIG_RK_SFC_NAND_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static struct mtd_partition nand_parts[MAX_PART_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline struct snand_mtd_dev *mtd_to_priv(struct mtd_info *ptr_mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return (struct snand_mtd_dev *)((char *)ptr_mtd -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) offsetof(struct snand_mtd_dev, mtd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int sfc_nand_erase_mtd(struct mtd_info *mtd, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ret = sfc_nand_erase_block(0, addr >> mtd->writesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) rkflash_print_error("%s fail ret= %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int sfc_nand_write_mtd(struct mtd_info *mtd, loff_t to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct mtd_oob_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u8 *data = (u8 *)ops->datbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) size_t remaining = ops->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) rkflash_print_dio("%s addr= %llx len= %x\n", __func__, to, (u32)remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if ((to + remaining) > mtd->size || to & mtd->writesize_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) remaining & mtd->writesize_mask || ops->ooblen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) rkflash_print_error("%s input error, %llx %x\n", __func__, to, (u32)remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ops->retlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) while (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) memcpy(p_dev->dma_buf, data, mtd->writesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) memset(p_dev->dma_buf + mtd->writesize, 0xff, mtd->oobsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ret = sfc_nand_prog_page_raw(0, to >> mtd->writesize_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) (u32 *)p_dev->dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (ret != SFC_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) rkflash_print_error("%s addr %llx ret= %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __func__, to, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) data += mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ops->retlen += mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) remaining -= mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) to += mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int sfc_nand_read_mtd(struct mtd_info *mtd, loff_t from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct mtd_oob_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u8 *data = (u8 *)ops->datbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) size_t remaining = ops->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool ecc_failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) size_t page, off, real_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int max_bitflips = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) rkflash_print_dio("%s addr= %llx len= %x\n", __func__, from, (u32)remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if ((from + remaining) > mtd->size || ops->ooblen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) rkflash_print_error("%s input error, from= %llx len= %x oob= %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __func__, from, (u32)remaining, (u32)ops->ooblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ops->retlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) while (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) page = from >> mtd->writesize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) off = from & mtd->writesize_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) real_size = min_t(u32, remaining, mtd->writesize - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ret = sfc_nand_read(page, (u32 *)data, off, real_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (ret == SFC_NAND_HW_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rkflash_print_error("%s addr %llx ret= %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __func__, from, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } else if (ret == SFC_NAND_ECC_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) rkflash_print_error("%s addr %llx ret= %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) __func__, from, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ecc_failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mtd->ecc_stats.failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) } else if (ret == SFC_NAND_ECC_REFRESH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rkflash_print_dio("%s addr %llx ret= %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) __func__, from, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mtd->ecc_stats.corrected += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) max_bitflips = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) data += real_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ops->retlen += real_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) remaining -= real_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) from += real_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ecc_failed && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ret = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return ret ? ret : max_bitflips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int sfc_nand_isbad_mtd(struct mtd_info *mtd, loff_t ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rkflash_print_dio("%s %llx\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (ofs & mtd->writesize_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) rkflash_print_error("%s %llx input error\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (snanddev_bbt_is_initialized(p_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) entry = snanddev_bbt_pos_to_entry(p_dev, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) status = snanddev_bbt_get_block_status(p_dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Lazy block status retrieval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if ((int)sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) status = NAND_BBT_BLOCK_FACTORY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) status = NAND_BBT_BLOCK_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) snanddev_bbt_set_block_status(p_dev, entry, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (status == NAND_BBT_BLOCK_WORN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) status == NAND_BBT_BLOCK_FACTORY_BAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ret = (int)sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pr_err("%s %llx is bad block\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static int sfc_nand_markbad_mtd(struct mtd_info *mtd, loff_t ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rkflash_print_error("%s %llx\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (ofs & mtd->erasesize_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) rkflash_print_error("%s %llx input error\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (sfc_nand_isbad_mtd(mtd, ofs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Erase block before marking it bad. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ret = sfc_nand_erase_block(0, ofs >> mtd->writesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rkflash_print_error("%s erase fail ofs 0x%llx ret=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) __func__, ofs, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Mark bad. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret = sfc_nand_mark_bad_block(0, ofs >> mtd->writesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rkflash_print_error("%s mark fail ofs 0x%llx ret=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __func__, ofs, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!snanddev_bbt_is_initialized(p_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) entry = snanddev_bbt_pos_to_entry(p_dev, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = snanddev_bbt_set_block_status(p_dev, entry, NAND_BBT_BLOCK_WORN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = snanddev_bbt_update(p_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Mark bad recheck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mtd->ecc_stats.badblocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rkflash_print_error("%s recheck fail ofs 0x%llx ret=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __func__, ofs, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int sfc_erase_mtd(struct mtd_info *mtd, struct erase_info *instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct snand_mtd_dev *nand = mtd_to_snanddev(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u64 addr, remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mutex_lock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) addr = instr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) remaining = instr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) rkflash_print_dio("%s addr= %llx len= %llx\n", __func__, addr, remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if ((addr + remaining) > mtd->size || addr & mtd->erasesize_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) while (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = snanddev_bbt_get_block_status(nand, addr >> mtd->erasesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ret == NAND_BBT_BLOCK_WORN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ret == NAND_BBT_BLOCK_FACTORY_BAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) rkflash_print_error("attempt to erase a bad/reserved block @%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) addr >> mtd->erasesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) addr += mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) remaining -= mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret = sfc_nand_erase_mtd(mtd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rkflash_print_error("%s fail addr 0x%llx ret=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) __func__, addr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) instr->fail_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) addr += mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) remaining -= mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) mutex_unlock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static int sfc_write_mtd(struct mtd_info *mtd, loff_t to, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) size_t *retlen, const u_char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct mtd_oob_ops ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mutex_lock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) memset(&ops, 0, sizeof(struct mtd_oob_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ops.datbuf = (u8 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ops.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ret = sfc_nand_write_mtd(mtd, to, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *retlen = ops.retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mutex_unlock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int sfc_read_mtd(struct mtd_info *mtd, loff_t from, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) size_t *retlen, u_char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct mtd_oob_ops ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mutex_lock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) memset(&ops, 0, sizeof(struct mtd_oob_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ops.datbuf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ops.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ret = sfc_nand_read_mtd(mtd, from, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *retlen = ops.retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) mutex_unlock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int sfc_isbad_mtd(struct mtd_info *mtd, loff_t ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mutex_lock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = sfc_nand_isbad_mtd(mtd, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mutex_unlock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int sfc_markbad_mtd(struct mtd_info *mtd, loff_t ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mutex_lock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ret = sfc_nand_markbad_mtd(mtd, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) mutex_unlock(p_dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * if not support rk_partition and partition is confirmed, you can define
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * strust def_nand_part by adding new partition like following example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * {"u-boot", 0x1000 * 512, 0x2000 * 512},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Note.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * 1. New partition format {name. size, offset}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * 2. Unit:Byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * 3. Last partition 'size' can be set 0xFFFFFFFFF to fully user left space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static struct mtd_partition def_nand_part[] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int sfc_nand_mtd_init(struct SFNAND_DEV *p_dev, struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int ret, i, part_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct snand_mtd_dev *nand = kzalloc(sizeof(*nand), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!nand) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rkflash_print_error("%s %d alloc failed\n", __func__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) nand->snand = p_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) capacity = (1 << p_dev->capacity) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) nand->mtd.name = "spi-nand0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) nand->mtd.type = MTD_NANDFLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) nand->mtd.writesize = p_dev->page_size * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) nand->mtd.flags = MTD_CAP_NANDFLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) nand->mtd.size = capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) nand->mtd._erase = sfc_erase_mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) nand->mtd._read = sfc_read_mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) nand->mtd._write = sfc_write_mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) nand->mtd._block_isbad = sfc_isbad_mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) nand->mtd._block_markbad = sfc_markbad_mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) nand->mtd.oobsize = 16 * p_dev->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) nand->mtd.bitflip_threshold = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) nand->mtd.erasesize = p_dev->block_size * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) nand->mtd.writebufsize = p_dev->page_size * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) nand->mtd.erasesize_shift = ffs(nand->mtd.erasesize) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) nand->mtd.erasesize_mask = (1 << nand->mtd.erasesize_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) nand->mtd.writesize_shift = ffs(nand->mtd.writesize) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) nand->mtd.writesize_mask = (1 << nand->mtd.writesize_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) nand->mtd.bitflip_threshold = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) nand->mtd.priv = nand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) nand->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) nand->dma_buf = kmalloc(SFC_NAND_PAGE_MAX_SIZE, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!nand->dma_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rkflash_print_error("%s dma_buf alloc failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) nand->bbt.option |= NANDDEV_BBT_USE_FLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ret = snanddev_bbt_init(nand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) rkflash_print_error("snanddev_bbt_init failed, ret= %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) part_num = ARRAY_SIZE(def_nand_part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) for (i = 0; i < part_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) nand_parts[i].name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) kstrdup(def_nand_part[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (def_nand_part[i].size == 0xFFFFFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) def_nand_part[i].size = capacity -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) def_nand_part[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) nand_parts[i].offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) def_nand_part[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) nand_parts[i].size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) def_nand_part[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) nand_parts[i].mask_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ret = mtd_device_register(&nand->mtd, nand_parts, part_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pr_err("%s register mtd fail %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pr_info("%s register mtd succuss\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) kfree(nand->dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kfree(nand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #endif