^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define pr_fmt(fmt) "sfc_nand: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "rkflash_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "rk_sftl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "sfc_nand.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static u32 sfc_nand_get_ecc_status0(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static u32 sfc_nand_get_ecc_status1(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static u32 sfc_nand_get_ecc_status2(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static u32 sfc_nand_get_ecc_status3(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static u32 sfc_nand_get_ecc_status4(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static u32 sfc_nand_get_ecc_status5(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static u32 sfc_nand_get_ecc_status6(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static u32 sfc_nand_get_ecc_status7(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static u32 sfc_nand_get_ecc_status8(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static struct nand_info spi_nand_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* TC58CVG0S0HxAIx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) { 0x98, 0xC2, 0x00, 4, 0x40, 1, 1024, 0x00, 18, 0x8, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* TC58CVG1S0HxAIx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) { 0x98, 0xCB, 0x00, 4, 0x40, 2, 1024, 0x00, 19, 0x8, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* TC58CVG2S0HRAIJ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) { 0x98, 0xED, 0x00, 8, 0x40, 1, 2048, 0x0C, 20, 0x8, 0, { 0x04, 0x0C, 0x08, 0x10 }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* TC58CVG1S3HRAIJ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) { 0x98, 0xEB, 0x00, 4, 0x40, 1, 2048, 0x0C, 19, 0x8, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* TC58CVG0S3HRAIJ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) { 0x98, 0xE2, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x8, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* MX35LF1GE4AB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) { 0xC2, 0x12, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* MX35LF2GE4AB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) { 0xC2, 0x22, 0x00, 4, 0x40, 2, 1024, 0x0C, 19, 0x4, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* MX35LF2GE4AD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) { 0xC2, 0x26, 0x00, 4, 0x40, 1, 2048, 0x0C, 19, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* MX35LF4GE4AD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) { 0xC2, 0x37, 0x00, 8, 0x40, 1, 2048, 0x0C, 20, 0x8, 1, { 0x04, 0x08, 0x14, 0x18 }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* MX35UF1GE4AC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) { 0xC2, 0x92, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* MX35UF2GE4AC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) { 0xC2, 0xA2, 0x00, 4, 0x40, 1, 2048, 0x0C, 19, 0x4, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* GD5F1GQ4UAYIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) { 0xC8, 0xF1, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* GD5F1GQ4RB9IGR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) { 0xC8, 0xD1, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* GD5F2GQ40BY2GR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) { 0xC8, 0xD2, 0x00, 4, 0x40, 2, 1024, 0x0C, 19, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* GD5F1GQ5UEYIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) { 0xC8, 0x51, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* GD5F2GQ5UEYIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) { 0xC8, 0x52, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* GD5F1GQ4R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) { 0xC8, 0xC1, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* GD5F4GQ6RExxG 1*4096 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) { 0xC8, 0x45, 0x00, 4, 0x40, 2, 2048, 0x4C, 20, 0x4, 1, { 0x04, 0x08, 0X14, 0x18 }, &sfc_nand_get_ecc_status2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* GD5F4GQ6UExxG 1*4096 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { 0xC8, 0x55, 0x00, 4, 0x40, 2, 2048, 0x4C, 20, 0x4, 1, { 0x04, 0x08, 0X14, 0x18 }, &sfc_nand_get_ecc_status2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* GD5F1GQ4UExxH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { 0xC8, 0xD9, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* W25N01GV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { 0xEF, 0xAA, 0x21, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 0, { 0x04, 0x14, 0x24, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* W25N02KVZEIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) { 0xEF, 0xAA, 0x22, 4, 0x40, 1, 2048, 0x4C, 19, 0x8, 0, { 0x04, 0x14, 0x24, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* W25N04KVZEIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) { 0xEF, 0xAA, 0x23, 4, 0x40, 1, 4096, 0x4C, 20, 0x8, 0, { 0x04, 0x14, 0x24, 0x34 }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* W25N01GW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) { 0xEF, 0xBA, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 0, { 0x04, 0x14, 0x24, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* W25N512GVEIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) { 0xEF, 0xAA, 0x20, 4, 0x40, 1, 512, 0x4C, 17, 0x1, 0, { 0x04, 0x14, 0x24, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* HYF2GQ4UAACAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) { 0xC9, 0x52, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0xE, 1, { 0x04, 0x24, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* HYF1GQ4UDACAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) { 0xC9, 0x21, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* HYF1GQ4UPACAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) { 0xC9, 0xA1, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* HYF2GQ4UDACAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) { 0xC9, 0x22, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* HYF2GQ4UHCCAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) { 0xC9, 0x5A, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0xE, 1, { 0x04, 0x24, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* HYF4GQ4UAACBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) { 0xC9, 0xD4, 0x00, 8, 0x40, 1, 2048, 0x4C, 20, 0x4, 1, { 0x20, 0x40, 0x24, 0x44 }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* FS35ND01G-S1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) { 0xCD, 0xB1, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x10, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* FS35ND02G-S2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) { 0xCD, 0xA2, 0x00, 4, 0x40, 1, 2048, 0x00, 19, 0x4, 0, { 0x10, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* FS35ND01G-S1Y2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) { 0xCD, 0xEA, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x4, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* FS35ND02G-S3Y2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) { 0xCD, 0xEB, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x4, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* FS35ND04G-S2Y2 1*4096 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { 0xCD, 0xEC, 0x00, 4, 0x40, 2, 2048, 0x4C, 20, 0x4, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* F35SQA001G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { 0xCD, 0x71, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* DS35Q1GA-IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) { 0xE5, 0x71, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* DS35Q2GA-IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) { 0xE5, 0x72, 0x00, 4, 0x40, 2, 1024, 0x0C, 19, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* DS35M1GA-1B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) { 0xE5, 0x21, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* DS35Q2GB-IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) { 0xE5, 0xF2, 0x00, 4, 0x40, 2, 1024, 0x0C, 19, 0x8, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* EM73C044VCC-H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) { 0xD5, 0x22, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x8, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* EM73D044VCE-H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) { 0xD5, 0x20, 0x00, 4, 0x40, 1, 2048, 0x0C, 19, 0x8, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* EM73E044SNA-G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) { 0xD5, 0x03, 0x00, 8, 0x40, 1, 2048, 0x4C, 20, 0x8, 1, { 0x04, 0x28, 0x08, 0x2C }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* EM73C044VCF-H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) { 0xD5, 0x25, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x04, 0x14, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* XT26G02A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) { 0x0B, 0xE2, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x8, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* XT26G01A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) { 0x0B, 0xE1, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x8, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* XT26G04A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) { 0x0B, 0xE3, 0x00, 4, 0x80, 1, 2048, 0x4C, 20, 0x8, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* XT26G01B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) { 0x0B, 0xF1, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x8, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* XT26G02B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) { 0x0B, 0xF2, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x4, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* XT26G01C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) { 0x0B, 0x11, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x8, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status7 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* XT26G02C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) { 0x0B, 0x12, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x8, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status7 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* XT26G04C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) { 0x0B, 0x13, 0x00, 8, 0x40, 1, 2048, 0x4C, 20, 0x8, 1, { 0x04, 0x08, 0x0C, 0x10 }, &sfc_nand_get_ecc_status7 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* XT26G11C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) { 0x0B, 0x15, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x8, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* MT29F2G01ABA, XT26G02E, F50L2G41XA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) { 0x2C, 0x24, 0x00, 4, 0x40, 2, 1024, 0x4C, 19, 0x8, 0, { 0x20, 0x24, 0xFF, 0xFF }, &sfc_nand_get_ecc_status6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* MT29F1G01ABA, F50L1G41XA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) { 0x2C, 0x14, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x8, 0, { 0x20, 0x24, 0xFF, 0xFF }, &sfc_nand_get_ecc_status6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* FM25S01 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) { 0xA1, 0xA1, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 0, { 0x00, 0x04, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* FM25S01A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) { 0xA1, 0xE4, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 0, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* FM25S02A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) { 0xA1, 0xE5, 0x00, 4, 0x40, 2, 1024, 0x4C, 19, 0x1, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* IS37SML01G1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) { 0xC8, 0x21, 0x00, 4, 0x40, 1, 1024, 0x00, 18, 0x1, 0, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* F50L1G41LB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) { 0xC8, 0x01, 0x00, 4, 0x40, 1, 1024, 0x4C, 18, 0x1, 0, { 0x14, 0x24, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* ATO25D1GA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) { 0x9B, 0x12, 0x00, 4, 0x40, 1, 1024, 0x40, 18, 0x1, 1, { 0x14, 0x24, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* BWJX08K-2Gb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) { 0xBC, 0xB3, 0x00, 4, 0x40, 1, 2048, 0x4C, 19, 0x8, 1, { 0x04, 0x10, 0xFF, 0xFF }, &sfc_nand_get_ecc_status0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* JS28U1GQSCAHG-83 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) { 0xBF, 0x21, 0x00, 4, 0x40, 1, 1024, 0x40, 18, 0x4, 1, { 0x08, 0x0C, 0xFF, 0xFF }, &sfc_nand_get_ecc_status8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* SGM7000I-S24W1GH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) { 0xEA, 0xC1, 0x00, 4, 0x40, 1, 1024, 0x0C, 18, 0x4, 1, { 0x04, 0x08, 0xFF, 0xFF }, &sfc_nand_get_ecc_status1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static struct nand_info *p_nand_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static u32 *gp_page_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static struct SFNAND_DEV sfc_nand_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static struct nand_info *sfc_nand_get_info(u8 *nand_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) for (i = 0; i < ARRAY_SIZE(spi_nand_tbl); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (spi_nand_tbl[i].id0 == nand_id[0] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spi_nand_tbl[i].id1 == nand_id[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (spi_nand_tbl[i].id2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spi_nand_tbl[i].id2 != nand_id[2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return &spi_nand_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int sfc_nand_write_en(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) op.sfcmd.b.cmd = CMD_WRITE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ret = sfc_request(&op, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int sfc_nand_rw_preset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) op.sfcmd.b.cmd = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) op.sfcmd.b.cs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) op.sfctrl.b.datalines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) op.sfctrl.b.cmdlines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) op.sfctrl.b.addrlines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = sfc_request(&op, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int sfc_nand_read_feature(u8 addr, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) op.sfcmd.b.cmd = 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) op.sfcmd.b.addrbits = SFC_ADDR_XBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) op.sfctrl.b.addrbits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ret = sfc_request(&op, addr, data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return SFC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int sfc_nand_write_feature(u32 addr, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) sfc_nand_write_en();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) op.sfcmd.b.cmd = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) op.sfcmd.b.addrbits = SFC_ADDR_XBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) op.sfcmd.b.rw = SFC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) op.sfctrl.b.addrbits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = sfc_request(&op, addr, &status, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int sfc_nand_wait_busy(u8 *data, int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *data = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return SFC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return SFC_NAND_WAIT_TIME_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * ecc default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * ecc bits: 0xC0[4,5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * 0b00, No bit errors were detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * 0b01, Bit errors were detected and corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * 0b10, Multiple bit errors were detected and not corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * 0b11, Bits errors were detected and corrected, bit error count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * reach the bit flip detection threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static u32 sfc_nand_get_ecc_status0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ecc = (status >> 4) & 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (ecc <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) else if (ecc == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * ecc spectial type1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * ecc bits: 0xC0[4,5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * 0b00, No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * 0b01, Bits errors were detected and corrected, bit error count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * may reach the bit flip detection threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * 0b10, Multiple bit errors were detected and not corrected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * 0b11, Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static u32 sfc_nand_get_ecc_status1(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ecc = (status >> 4) & 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ecc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else if (ecc == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * ecc spectial type2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * ecc bits: 0xC0[4,5] 0xF0[4,5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * [0b0000, 0b0011], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * [0b0100, 0b0111], Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * [0b1000, 0b1011], Multiple bit errors were detected and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * not corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * [0b1100, 0b1111], reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static u32 sfc_nand_get_ecc_status2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u8 status, status1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ret = sfc_nand_read_feature(0xF0, &status1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ecc = (status >> 4) & 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ecc = (ecc << 2) | ((status1 >> 4) & 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ecc < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) else if (ecc == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * ecc spectial type3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * ecc bits: 0xC0[4,5] 0xF0[4,5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * [0b0000, 0b0011], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * [0b0100, 0b0111], Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * [0b1000, 0b1011], Multiple bit errors were detected and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * not corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * [0b1100, 0b1111], Bit error count equals the bit flip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * detectio nthreshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static u32 sfc_nand_get_ecc_status3(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u8 status, status1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = sfc_nand_read_feature(0xF0, &status1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ecc = (status >> 4) & 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ecc = (ecc << 2) | ((status1 >> 4) & 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (ecc < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) else if (ecc == 7 || ecc >= 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * ecc spectial type4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * ecc bits: 0xC0[2,5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * [0b0000], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * [0b0001, 0b0111], Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * [0b1000], Multiple bit errors were detected and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * not corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * [0b1100], Bit error count equals the bit flip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * detection threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * else, reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static u32 sfc_nand_get_ecc_status4(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ecc = (status >> 2) & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ecc < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) else if (ecc == 7 || ecc == 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * ecc spectial type5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * ecc bits: 0xC0[4,6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * [0b000], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * [0b001, 0b011], Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * [0b100], Bit error count equals the bit flip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * detection threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * [0b101, 0b110], Reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * [0b111], Multiple bit errors were detected and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * not corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static u32 sfc_nand_get_ecc_status5(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ecc = (status >> 4) & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (ecc < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) else if (ecc == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * ecc spectial type6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * ecc bits: 0xC0[4,6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * [0b000], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * [0b001], 1-3 Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * [0b010], Multiple bit errors were detected and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * not corrected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * [0b011], 4-6 Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * [0b101], Bit error count equals the bit flip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * detectionthreshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * others, Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static u32 sfc_nand_get_ecc_status6(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ecc = (status >> 4) & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ecc == 0 || ecc == 1 || ecc == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) else if (ecc == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * ecc spectial type7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * ecc bits: 0xC0[4,7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * [0b0000], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * [0b0001, 0b0111], 1-7 Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * [0b1000], 8 Bit errors were detected and corrected. Bit error count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * equals the bit flip detectionthreshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * [0b1111], Bit errors greater than ECC capability(8 bits) and not corrected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * others, Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static u32 sfc_nand_get_ecc_status7(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ecc = (status >> 4) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (ecc < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) else if (ecc == 7 || ecc == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * ecc spectial type8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * ecc bits: 0xC0[4,6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * [0b000], No bit errors were detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * [0b001, 0b011], 1~3 Bit errors were detected and corrected. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * reach Flipping Bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * [0b100], Bit error count equals the bit flip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * detection threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * others, Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static u32 sfc_nand_get_ecc_status8(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u8 ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u32 timeout = 1000 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) for (i = 0; i < timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ret = sfc_nand_read_feature(0xC0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!(status & (1 << 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) sfc_delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ecc = (status >> 4) & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (ecc < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ret = SFC_NAND_ECC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) else if (ecc == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ret = SFC_NAND_ECC_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ret = (u32)SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) u32 sfc_nand_erase_block(u8 cs, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rkflash_print_dio("%s %x\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) op.sfcmd.b.cmd = 0xd8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) op.sfcmd.b.addrbits = SFC_ADDR_24BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) op.sfcmd.b.rw = SFC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) sfc_nand_write_en();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = sfc_request(&op, addr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = sfc_nand_wait_busy(&status, 1000 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (status & (1 << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return SFC_NAND_PROG_ERASE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static u32 sfc_nand_read_cache(u32 row, u32 *p_page_buf, u32 column, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u32 plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) op.sfcmd.b.cmd = sfc_nand_dev.page_read_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) op.sfcmd.b.addrbits = SFC_ADDR_XBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) op.sfcmd.b.dummybits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) op.sfctrl.b.datalines = sfc_nand_dev.read_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) op.sfctrl.b.addrbits = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) plane = p_nand_info->plane_per_die == 2 ? ((row >> 6) & 0x1) << 12 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = sfc_request(&op, plane | column, p_page_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return SFC_NAND_HW_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) u32 sfc_nand_prog_page_raw(u8 cs, u32 addr, u32 *p_page_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u32 plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) u32 page_size = SFC_NAND_SECTOR_FULL_SIZE * p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) u32 data_area_size = SFC_NAND_SECTOR_SIZE * p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) rkflash_print_dio("%s %x %x\n", __func__, addr, p_page_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sfc_nand_write_en();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (sfc_nand_dev.prog_lines == DATA_LINES_X4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) p_nand_info->feature & FEA_SOFT_QOP_BIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) sfc_get_version() < SFC_VER_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) sfc_nand_rw_preset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) op.sfcmd.b.cmd = sfc_nand_dev.page_prog_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) op.sfcmd.b.addrbits = SFC_ADDR_XBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) op.sfcmd.b.rw = SFC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) op.sfctrl.b.datalines = sfc_nand_dev.prog_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) op.sfctrl.b.addrbits = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) plane = p_nand_info->plane_per_die == 2 ? ((addr >> 6) & 0x1) << 12 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) sfc_request(&op, plane, p_page_buf, page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * At the moment of power lost or dev running in harsh environment, flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * maybe work in a unkonw state and result in bit flip, when this situation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * is detected by cache recheck, it's better to wait a second for a reliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * hardware environment to avoid abnormal data written to flash array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (p_nand_info->id0 == MID_GIGADEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) sfc_nand_read_cache(addr, (u32 *)sfc_nand_dev.recheck_buffer, 0, data_area_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (memcmp(sfc_nand_dev.recheck_buffer, p_page_buf, data_area_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) rkflash_print_error("%s cache bitflip1\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) sfc_request(&op, plane, p_page_buf, page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) op.sfcmd.b.cmd = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) op.sfcmd.b.addrbits = SFC_ADDR_24BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) op.sfcmd.b.rw = SFC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = sfc_request(&op, addr, p_page_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = sfc_nand_wait_busy(&status, 1000 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (status & (1 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return SFC_NAND_PROG_ERASE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) u32 sfc_nand_prog_page(u8 cs, u32 addr, u32 *p_data, u32 *p_spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 sec_per_page = p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u32 data_size = sec_per_page * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct nand_mega_area *meta = &p_nand_info->meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) memcpy(gp_page_buf, p_data, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) memset(&gp_page_buf[data_size / 4], 0xff, sec_per_page * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) gp_page_buf[(data_size + meta->off0) / 4] = p_spare[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) gp_page_buf[(data_size + meta->off1) / 4] = p_spare[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (sec_per_page == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) gp_page_buf[(data_size + meta->off2) / 4] = p_spare[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) gp_page_buf[(data_size + meta->off3) / 4] = p_spare[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ret = sfc_nand_prog_page_raw(cs, addr, gp_page_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u32 sfc_nand_read(u32 row, u32 *p_page_buf, u32 column, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) u32 plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u32 ecc_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) op.sfcmd.b.cmd = 0x13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) op.sfcmd.b.rw = SFC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) op.sfcmd.b.addrbits = SFC_ADDR_24BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) sfc_request(&op, row, p_page_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (sfc_nand_dev.read_lines == DATA_LINES_X4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) p_nand_info->feature & FEA_SOFT_QOP_BIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) sfc_get_version() < SFC_VER_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) sfc_nand_rw_preset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) sfc_nand_wait_busy(&status, 1000 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ecc_result = p_nand_info->ecc_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) op.sfcmd.b.cmd = sfc_nand_dev.page_read_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) op.sfcmd.b.addrbits = SFC_ADDR_XBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) op.sfcmd.b.dummybits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) op.sfctrl.b.datalines = sfc_nand_dev.read_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) op.sfctrl.b.addrbits = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) plane = p_nand_info->plane_per_die == 2 ? ((row >> 6) & 0x1) << 12 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = sfc_request(&op, plane | column, p_page_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) rkflash_print_dio("%s %x %x\n", __func__, row, p_page_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return SFC_NAND_HW_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return ecc_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u32 sfc_nand_read_page_raw(u8 cs, u32 addr, u32 *p_page_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) u32 page_size = SFC_NAND_SECTOR_FULL_SIZE * p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return sfc_nand_read(addr, p_page_buf, 0, page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u32 sfc_nand_read_page(u8 cs, u32 addr, u32 *p_data, u32 *p_spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) u32 sec_per_page = p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u32 data_size = sec_per_page * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct nand_mega_area *meta = &p_nand_info->meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memcpy(p_data, gp_page_buf, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) p_spare[0] = gp_page_buf[(data_size + meta->off0) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) p_spare[1] = gp_page_buf[(data_size + meta->off1) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (p_nand_info->sec_per_page == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) p_spare[2] = gp_page_buf[(data_size + meta->off2) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) p_spare[3] = gp_page_buf[(data_size + meta->off3) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (ret == SFC_NAND_HW_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ret = SFC_NAND_ECC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (ret != SFC_NAND_ECC_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rkflash_print_error("%s[0x%x], ret=0x%x\n", __func__, addr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (p_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rkflash_print_hex("data:", p_data, 4, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (p_spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) rkflash_print_hex("spare:", p_spare, 4, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (ret == SFC_NAND_ECC_ERROR && retries < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) u32 sfc_nand_check_bad_block(u8 cs, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) u32 data_size = p_nand_info->sec_per_page * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u32 marker = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = sfc_nand_read(addr, &marker, data_size, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* unify with mtd framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (ret == SFC_NAND_ECC_ERROR || (u16)marker != 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rkflash_print_error("%s page= %x ret= %x spare= %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) __func__, addr, ret, marker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* Original bad block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if ((u16)marker != 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u32 sfc_nand_mark_bad_block(u8 cs, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) u32 data_size = p_nand_info->sec_per_page * SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return SFC_NAND_HW_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) gp_page_buf[data_size / 4] = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ret = sfc_nand_prog_page_raw(cs, addr, gp_page_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return SFC_NAND_HW_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int sfc_nand_read_id(u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct rk_sfc_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) op.sfcmd.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) op.sfcmd.b.cmd = CMD_READ_JEDECID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) op.sfcmd.b.addrbits = SFC_ADDR_XBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) op.sfctrl.d32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) op.sfctrl.b.addrbits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ret = sfc_request(&op, 0, data, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) #if defined(CONFIG_RK_SFTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Read the 1st page's 1st byte of a phy_blk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * If not FF, it's bad blk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int sfc_nand_get_bad_block_list(u16 *table, u32 die)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) u32 bad_cnt, page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u32 blk_per_die;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) u16 blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) rkflash_print_info("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) bad_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) blk_per_die = p_nand_info->plane_per_die *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) p_nand_info->blk_per_plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) for (blk = 0; blk < blk_per_die; blk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) page = (blk + blk_per_die * die) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) p_nand_info->page_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (sfc_nand_check_bad_block(die, page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) table[bad_cnt++] = blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) rkflash_print_error("die[%d], bad_blk[%d]\n", die, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return (int)bad_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) void sfc_nand_ftl_ops_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* para init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) g_nand_phy_info.nand_type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) g_nand_phy_info.die_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) g_nand_phy_info.plane_per_die = p_nand_info->plane_per_die;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) g_nand_phy_info.blk_per_plane = p_nand_info->blk_per_plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) g_nand_phy_info.page_per_blk = p_nand_info->page_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) g_nand_phy_info.page_per_slc_blk = p_nand_info->page_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) g_nand_phy_info.byte_per_sec = SFC_NAND_SECTOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) g_nand_phy_info.sec_per_page = p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) g_nand_phy_info.sec_per_blk = p_nand_info->sec_per_page *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) p_nand_info->page_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) g_nand_phy_info.reserved_blk = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) g_nand_phy_info.blk_per_die = p_nand_info->plane_per_die *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) p_nand_info->blk_per_plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) g_nand_phy_info.ecc_bits = p_nand_info->max_ecc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* driver register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) g_nand_ops.get_bad_blk_list = sfc_nand_get_bad_block_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) g_nand_ops.erase_blk = sfc_nand_erase_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) g_nand_ops.prog_page = sfc_nand_prog_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) g_nand_ops.read_page = sfc_nand_read_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) g_nand_ops.bch_sel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static int sfc_nand_enable_QE(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int ret = SFC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ret = sfc_nand_read_feature(0xB0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (ret != SFC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (status & 1) /* is QE bit set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return SFC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) status |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return sfc_nand_write_feature(0xB0, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u32 sfc_nand_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) u8 status, id_byte[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sfc_nand_read_id(id_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) rkflash_print_error("sfc_nand id: %x %x %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) id_byte[0], id_byte[1], id_byte[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (id_byte[0] == 0xFF || id_byte[0] == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return (u32)FTL_NO_FLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) p_nand_info = sfc_nand_get_info(id_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (!p_nand_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) pr_err("The device not support yet!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return (u32)FTL_UNSUPPORTED_FLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!gp_page_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) gp_page_buf = (u32 *)__get_free_pages(GFP_KERNEL | GFP_DMA32, get_order(SFC_NAND_PAGE_MAX_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!gp_page_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) sfc_nand_dev.manufacturer = id_byte[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) sfc_nand_dev.mem_type = id_byte[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) sfc_nand_dev.capacity = p_nand_info->density;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) sfc_nand_dev.block_size = p_nand_info->page_per_blk * p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) sfc_nand_dev.page_size = p_nand_info->sec_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* disable block lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) sfc_nand_write_feature(0xA0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) sfc_nand_dev.read_lines = DATA_LINES_X1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) sfc_nand_dev.prog_lines = DATA_LINES_X1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) sfc_nand_dev.page_read_cmd = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) sfc_nand_dev.page_prog_cmd = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (!sfc_nand_dev.recheck_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) sfc_nand_dev.recheck_buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32, get_order(SFC_NAND_PAGE_MAX_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!sfc_nand_dev.recheck_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) pr_err("%s recheck_buffer alloc failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (p_nand_info->feature & FEA_4BIT_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if ((p_nand_info->has_qe_bits && sfc_nand_enable_QE() == SFC_OK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) !p_nand_info->has_qe_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) sfc_nand_dev.read_lines = DATA_LINES_X4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) sfc_nand_dev.page_read_cmd = 0x6b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (p_nand_info->feature & FEA_4BIT_PROG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) sfc_nand_dev.read_lines == DATA_LINES_X4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sfc_nand_dev.prog_lines = DATA_LINES_X4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) sfc_nand_dev.page_prog_cmd = 0x32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) sfc_nand_read_feature(0xA0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) rkflash_print_info("sfc_nand A0 = 0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) sfc_nand_read_feature(0xB0, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) rkflash_print_info("sfc_nand B0 = 0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) rkflash_print_info("read_lines = %x\n", sfc_nand_dev.read_lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) rkflash_print_info("prog_lines = %x\n", sfc_nand_dev.prog_lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) rkflash_print_info("page_read_cmd = %x\n", sfc_nand_dev.page_read_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) rkflash_print_info("page_prog_cmd = %x\n", sfc_nand_dev.page_prog_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return SFC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) void sfc_nand_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* to-do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) free_pages((unsigned long)sfc_nand_dev.recheck_buffer, get_order(SFC_NAND_PAGE_MAX_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) free_pages((unsigned long)gp_page_buf, get_order(SFC_NAND_PAGE_MAX_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct SFNAND_DEV *sfc_nand_get_private_dev(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return &sfc_nand_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct nand_info *sfc_nand_get_nand_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return p_nand_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }