^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright © 2004 Micron Technology Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright © 2004 David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mtd/rawnand.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mtd/partitions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/omap-dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mtd/nand_bch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/platform_data/elm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/omap-gpmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/platform_data/mtd-nand-omap2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DRIVER_NAME "omap2-nand"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define OMAP_NAND_TIMEOUT_MS 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define NAND_Ecc_P1e (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define NAND_Ecc_P2e (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define NAND_Ecc_P4e (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define NAND_Ecc_P8e (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define NAND_Ecc_P16e (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define NAND_Ecc_P32e (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define NAND_Ecc_P64e (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define NAND_Ecc_P128e (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define NAND_Ecc_P256e (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define NAND_Ecc_P512e (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define NAND_Ecc_P1024e (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define NAND_Ecc_P2048e (1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define NAND_Ecc_P1o (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NAND_Ecc_P2o (1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define NAND_Ecc_P4o (1 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define NAND_Ecc_P8o (1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define NAND_Ecc_P16o (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define NAND_Ecc_P32o (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define NAND_Ecc_P64o (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define NAND_Ecc_P128o (1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define NAND_Ecc_P256o (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define NAND_Ecc_P512o (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define NAND_Ecc_P1024o (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define NAND_Ecc_P2048o (1 << 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define TF(value) (value ? 1 : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define PREFETCH_CONFIG1_CS_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define ECC_CONFIG_CS_SHIFT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define CS_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define ENABLE_PREFETCH (0x1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define DMA_MPU_MODE_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ECCSIZE0_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define ECCSIZE1_SHIFT 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define ECC1RESULTSIZE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ECCCLEAR 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define ECC1 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define PREFETCH_FIFOTHRESHOLD_MAX 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define STATUS_BUFF_EMPTY 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SECTOR_BYTES 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define BCH4_BIT_PAD 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* GPMC ecc engine settings for read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* GPMC ecc engine settings for write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define BADBLOCK_MARKER_LENGTH 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 0x07, 0x0e};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 0xac, 0x6b, 0xff, 0x99, 0x7b};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct omap_nand_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct nand_chip nand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int gpmc_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bool dev_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) enum nand_io xfer_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int devsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) enum omap_ecc ecc_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct device_node *elm_of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned long phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct dma_chan *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int gpmc_irq_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int gpmc_irq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) OMAP_NAND_IO_READ = 0, /* read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) OMAP_NAND_IO_WRITE, /* write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u_char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Interface to GPMC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct gpmc_nand_regs reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct gpmc_nand_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bool flash_bbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* fields specific for BCHx_HW ECC scheme */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct device *elm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* NAND ready gpio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct gpio_desc *ready_gpiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * omap_prefetch_enable - configures and starts prefetch transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @cs: cs (chip select) number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @fifo_th: fifo threshold to be used for read/ write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @dma_mode: dma mode enable (1) or disable (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @u32_count: number of bytes to be transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @is_write: prefetch read(0) or write post(1) mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned int u32_count, int is_write, struct omap_nand_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (readl(info->reg.gpmc_prefetch_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Set the amount of bytes to be prefetched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) writel(u32_count, info->reg.gpmc_prefetch_config2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Set dma/mpu mode, the prefetch read / post write and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * enable the engine. Set which cs is has requested for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) writel(val, info->reg.gpmc_prefetch_config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Start the prefetch engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) writel(0x1, info->reg.gpmc_prefetch_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * omap_prefetch_reset - disables and stops the prefetch engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 config1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* check if the same module/cs is trying to reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) config1 = readl(info->reg.gpmc_prefetch_config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Stop the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) writel(0x0, info->reg.gpmc_prefetch_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Reset/disable the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) writel(0x0, info->reg.gpmc_prefetch_config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * omap_hwcontrol - hardware specific access to control-lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @cmd: command to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * NAND_NCE: bit 0 -> don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * NAND_CLE: bit 1 -> Command Latch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * NAND_ALE: bit 2 -> Address Latch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * NOTE: boards may use different bits for these!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (cmd != NAND_CMD_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (ctrl & NAND_CLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) writeb(cmd, info->reg.gpmc_nand_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) else if (ctrl & NAND_ALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) writeb(cmd, info->reg.gpmc_nand_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) else /* NAND_NCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) writeb(cmd, info->reg.gpmc_nand_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * omap_read_buf8 - read data from NAND controller into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @buf: buffer to store date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @len: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct nand_chip *nand = mtd_to_nand(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * omap_write_buf8 - write buffer to NAND controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @len: number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u_char *p = (u_char *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bool status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) while (len--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* wait until buffer is available for write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) status = info->ops->nand_writebuffer_empty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } while (!status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * omap_read_buf16 - read data from NAND controller into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @buf: buffer to store date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @len: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct nand_chip *nand = mtd_to_nand(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * omap_write_buf16 - write buffer to NAND controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * @len: number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u16 *p = (u16 *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) bool status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* FIXME try bursts of writesw() or DMA ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) len >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) while (len--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* wait until buffer is available for write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) status = info->ops->nand_writebuffer_empty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } while (!status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * omap_read_buf_pref - read data from NAND controller into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @buf: buffer to store date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * @len: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) uint32_t r_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u32 *p = (u32 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* take care of subpage reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (len % 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (info->nand.options & NAND_BUSWIDTH_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) omap_read_buf16(mtd, buf, len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) omap_read_buf8(mtd, buf, len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) p = (u32 *) (buf + len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) len -= len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* configure and start prefetch transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ret = omap_prefetch_enable(info->gpmc_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* PFPW engine is busy, use cpu copy method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (info->nand.options & NAND_BUSWIDTH_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) omap_read_buf16(mtd, (u_char *)p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) omap_read_buf8(mtd, (u_char *)p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) r_count = readl(info->reg.gpmc_prefetch_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) r_count = r_count >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) p += r_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) len -= r_count << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* disable and stop the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) omap_prefetch_reset(info->gpmc_cs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * omap_write_buf_pref - write buffer to NAND controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @len: number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) uint32_t w_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int i = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) u16 *p = (u16 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned long tim, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* take care of subpage writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (len % 2 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) writeb(*buf, info->nand.legacy.IO_ADDR_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) p = (u16 *)(buf + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* configure and start prefetch transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ret = omap_prefetch_enable(info->gpmc_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* PFPW engine is busy, use cpu copy method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (info->nand.options & NAND_BUSWIDTH_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) omap_write_buf16(mtd, (u_char *)p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) omap_write_buf8(mtd, (u_char *)p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) w_count = readl(info->reg.gpmc_prefetch_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) w_count = w_count >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) for (i = 0; (i < w_count) && len; i++, len -= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* wait for data to flushed-out before reset the prefetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) tim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) limit = (loops_per_jiffy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) val = readl(info->reg.gpmc_prefetch_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) val = PREFETCH_STATUS_COUNT(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } while (val && (tim++ < limit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* disable and stop the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) omap_prefetch_reset(info->gpmc_cs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * omap_nand_dma_callback: callback on the completion of dma transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * @data: pointer to completion data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void omap_nand_dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) complete((struct completion *) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * omap_nand_dma_transfer: configure and start dma transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @addr: virtual address in RAM of source/destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @len: number of data bytes to be transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @is_write: flag for read/write operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned int len, int is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) unsigned long tim, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unsigned n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!virt_addr_valid(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto out_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) sg_init_one(&sg, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (n == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dev_err(&info->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) "Couldn't DMA map a %d byte buffer\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto out_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto out_copy_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) tx->callback = omap_nand_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) tx->callback_param = &info->comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dmaengine_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) init_completion(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* setup and start DMA using dma_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dma_async_issue_pending(info->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* configure and start prefetch transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = omap_prefetch_enable(info->gpmc_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* PFPW engine is busy, use cpu copy method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto out_copy_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) wait_for_completion(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) val = readl(info->reg.gpmc_prefetch_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) val = PREFETCH_STATUS_COUNT(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) } while (val && (tim++ < limit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* disable and stop the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) omap_prefetch_reset(info->gpmc_cs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) out_copy_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) out_copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (info->nand.options & NAND_BUSWIDTH_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) : omap_write_buf16(mtd, (u_char *) addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) : omap_write_buf8(mtd, (u_char *) addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * omap_read_buf_dma_pref - read data from NAND controller into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @buf: buffer to store date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @len: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (len <= mtd->oobsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) omap_read_buf_pref(chip, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* start transfer in DMA mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) omap_nand_dma_transfer(mtd, buf, len, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * omap_write_buf_dma_pref - write buffer to NAND controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @len: number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (len <= mtd->oobsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) omap_write_buf_pref(chip, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* start transfer in DMA mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * omap_nand_irq - GPMC irq handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * @this_irq: gpmc irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * @dev: omap_nand_info structure pointer is passed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static irqreturn_t omap_nand_irq(int this_irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct omap_nand_info *info = (struct omap_nand_info *) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) u32 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) bytes = readl(info->reg.gpmc_prefetch_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (this_irq == info->gpmc_irq_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (info->buf_len && (info->buf_len < bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) bytes = info->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) else if (!info->buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) bytes >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) info->buf = info->buf + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) info->buf_len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bytes >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) info->buf = info->buf + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (this_irq == info->gpmc_irq_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) complete(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) disable_irq_nosync(info->gpmc_irq_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) disable_irq_nosync(info->gpmc_irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * omap_read_buf_irq_pref - read data from NAND controller into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @buf: buffer to store date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @len: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (len <= mtd->oobsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) omap_read_buf_pref(chip, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) info->iomode = OMAP_NAND_IO_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) info->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) init_completion(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* configure and start prefetch transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ret = omap_prefetch_enable(info->gpmc_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* PFPW engine is busy, use cpu copy method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) goto out_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) info->buf_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) enable_irq(info->gpmc_irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) enable_irq(info->gpmc_irq_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* waiting for read to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) wait_for_completion(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* disable and stop the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) omap_prefetch_reset(info->gpmc_cs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) out_copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (info->nand.options & NAND_BUSWIDTH_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) omap_read_buf16(mtd, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) omap_read_buf8(mtd, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * omap_write_buf_irq_pref - write buffer to NAND controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * @len: number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) unsigned long tim, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (len <= mtd->oobsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) omap_write_buf_pref(chip, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) info->iomode = OMAP_NAND_IO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) info->buf = (u_char *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) init_completion(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* configure and start prefetch transfer : size=24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ret = omap_prefetch_enable(info->gpmc_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* PFPW engine is busy, use cpu copy method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) goto out_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) info->buf_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) enable_irq(info->gpmc_irq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) enable_irq(info->gpmc_irq_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* waiting for write to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) wait_for_completion(&info->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* wait for data to flushed-out before reset the prefetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) tim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) val = readl(info->reg.gpmc_prefetch_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) val = PREFETCH_STATUS_COUNT(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) } while (val && (tim++ < limit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* disable and stop the PFPW engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) omap_prefetch_reset(info->gpmc_cs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) out_copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (info->nand.options & NAND_BUSWIDTH_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) omap_write_buf16(mtd, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) omap_write_buf8(mtd, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * gen_true_ecc - This function will generate true ECC value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @ecc_buf: buffer to store ecc code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * This generated true ECC value can be used when correcting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * data read from NAND flash memory core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void gen_true_ecc(u8 *ecc_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) P1e(tmp) | P2048o(tmp) | P2048e(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * @ecc_data1: ecc code from nand spare area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * @ecc_data2: ecc code from hardware register obtained from hardware ecc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @page_data: page data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * This function compares two ECC's and indicates if there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * If the error can be corrected it will be corrected to the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * If there is no error, %0 is returned. If there is an error but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * was corrected, %1 is returned. Otherwise, %-1 is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u8 *ecc_data2, /* read from register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) u8 *page_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) uint i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u8 ecc_bit[24];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) u8 ecc_sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) u8 find_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) uint find_byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int isEccFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) gen_true_ecc(ecc_data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) gen_true_ecc(ecc_data2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) for (i = 0; i <= 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *(ecc_data1 + i) = ~(*(ecc_data1 + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) *(ecc_data2 + i) = ~(*(ecc_data2 + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) tmp0_bit[i] = *ecc_data1 % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) *ecc_data1 = *ecc_data1 / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) tmp1_bit[i] = *(ecc_data1 + 1) % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) tmp2_bit[i] = *(ecc_data1 + 2) % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) comp0_bit[i] = *ecc_data2 % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *ecc_data2 = *ecc_data2 / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) comp1_bit[i] = *(ecc_data2 + 1) % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) comp2_bit[i] = *(ecc_data2 + 2) % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) for (i = 0; i < 24; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ecc_sum += ecc_bit[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) switch (ecc_sum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Not reached because this function is not called if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * ECC values are equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* Uncorrectable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pr_debug("ECC UNCORRECTED_ERROR 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* UN-Correctable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) pr_debug("ECC UNCORRECTED_ERROR B\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Correctable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) find_byte = (ecc_bit[23] << 8) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) (ecc_bit[21] << 7) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) (ecc_bit[19] << 6) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) (ecc_bit[17] << 5) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) (ecc_bit[15] << 4) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) (ecc_bit[13] << 3) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) (ecc_bit[11] << 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) (ecc_bit[9] << 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ecc_bit[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pr_debug("Correcting single bit ECC error at offset: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) "%d, bit: %d\n", find_byte, find_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) page_data[find_byte] ^= (1 << find_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (isEccFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (ecc_data2[0] == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ecc_data2[1] == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ecc_data2[2] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pr_debug("UNCORRECTED_ERROR default\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * omap_correct_data - Compares the ECC read with HW generated ECC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * @dat: page data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * @read_ecc: ecc read from nand flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * @calc_ecc: ecc read from HW ECC registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * Compares the ecc read from nand spare area with ECC registers values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * detection and correction. If there are no errors, %0 is returned. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * there were errors and all of the errors were corrected, the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * corrected errors is returned. If uncorrectable errors exist, %-1 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static int omap_correct_data(struct nand_chip *chip, u_char *dat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u_char *read_ecc, u_char *calc_ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int blockCnt = 0, i = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* Ex NAND_ECC_HW12_2048 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) info->nand.ecc.size == 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) blockCnt = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) blockCnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) for (i = 0; i < blockCnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (memcmp(read_ecc, calc_ecc, 3) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* keep track of the number of corrected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) stat += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) read_ecc += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) calc_ecc += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dat += 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * omap_calcuate_ecc - Generate non-inverted ECC bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @dat: The pointer to data on which ecc is computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @ecc_code: The ecc_code buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * Using noninverted ECC can be considered ugly since writing a blank
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * page ie. padding will clear the ECC bytes. This is no problem as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * nobody is trying to write data on the seemingly unused page. Reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * an erased page will produce an ECC mismatch between generated and read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * ECC bytes that has to be dealt with separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u_char *ecc_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) val = readl(info->reg.gpmc_ecc_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* read ecc result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) val = readl(info->reg.gpmc_ecc1_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) *ecc_code++ = val; /* P128e, ..., P1e */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *ecc_code++ = val >> 16; /* P128o, ..., P1o */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * omap_enable_hwecc - This function enables the hardware ecc functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @mode: Read/Write mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static void omap_enable_hwecc(struct nand_chip *chip, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* clear ecc and enable bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) val = ECCCLEAR | ECC1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) writel(val, info->reg.gpmc_ecc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* program ecc and result sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ECC1RESULTSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) writel(val, info->reg.gpmc_ecc_size_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case NAND_ECC_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case NAND_ECC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case NAND_ECC_READSYN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) writel(ECCCLEAR, info->reg.gpmc_ecc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) dev_info(&info->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) "error: unrecognized Mode[%d]!\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) writel(val, info->reg.gpmc_ecc_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * omap_wait - wait until the command is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @this: NAND Chip structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * Wait function is called during Program and erase operations and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * the way it is called from MTD layer, we should wait till the NAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * chip is ready after the programming/erase operation has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * Erase can take up to 400ms and program up to 20ms according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * general NAND and SmartMedia specs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static int omap_wait(struct nand_chip *this)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) unsigned long timeo = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) timeo += msecs_to_jiffies(400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) while (time_before(jiffies, timeo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) status = readb(info->reg.gpmc_nand_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (status & NAND_STATUS_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) status = readb(info->reg.gpmc_nand_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * omap_dev_ready - checks the NAND Ready GPIO line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Returns true if ready and false if busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int omap_dev_ready(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return gpiod_get_value(info->ready_gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * @mode: Read/Write mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * When using BCH with SW correction (i.e. no ELM), sector size is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * for both reading and writing with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * eccsize0 = 0 (no additional protected byte in spare area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) unsigned int bch_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unsigned int dev_width, nsectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) enum omap_ecc ecc_opt = info->ecc_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) u32 val, wr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned int ecc_size1, ecc_size0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* GPMC configurations for calculating ECC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) switch (ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) bch_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) nsectors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) wr_mode = BCH_WRAPMODE_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ecc_size0 = BCH_ECC_SIZE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ecc_size1 = BCH_ECC_SIZE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) bch_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) nsectors = chip->ecc.steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (mode == NAND_ECC_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) wr_mode = BCH_WRAPMODE_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ecc_size0 = BCH4R_ECC_SIZE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ecc_size1 = BCH4R_ECC_SIZE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) wr_mode = BCH_WRAPMODE_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ecc_size0 = BCH_ECC_SIZE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ecc_size1 = BCH_ECC_SIZE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) bch_type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) nsectors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) wr_mode = BCH_WRAPMODE_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ecc_size0 = BCH_ECC_SIZE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ecc_size1 = BCH_ECC_SIZE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) bch_type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) nsectors = chip->ecc.steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (mode == NAND_ECC_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) wr_mode = BCH_WRAPMODE_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ecc_size0 = BCH8R_ECC_SIZE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ecc_size1 = BCH8R_ECC_SIZE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) wr_mode = BCH_WRAPMODE_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ecc_size0 = BCH_ECC_SIZE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ecc_size1 = BCH_ECC_SIZE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) bch_type = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) nsectors = chip->ecc.steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (mode == NAND_ECC_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) wr_mode = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ecc_size0 = 52; /* ECC bits in nibbles per sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) wr_mode = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ecc_size0 = 0; /* extra bits in nibbles per sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ecc_size1 = 52; /* OOB bits in nibbles per sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) writel(ECC1, info->reg.gpmc_ecc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /* Configure ecc size for BCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) writel(val, info->reg.gpmc_ecc_size_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* BCH configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) val = ((1 << 16) | /* enable BCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) (bch_type << 12) | /* BCH4/BCH8/BCH16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) (wr_mode << 8) | /* wrap mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) (dev_width << 7) | /* bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) (((nsectors-1) & 0x7) << 4) | /* number of sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) (info->gpmc_cs << 1) | /* ECC CS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) (0x1)); /* enable ECC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) writel(val, info->reg.gpmc_ecc_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* Clear ecc and enable bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 0x97, 0x79, 0xe5, 0x24, 0xb5};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * @dat: The pointer to data on which ecc is computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * @ecc_code: The ecc_code buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * @i: The sector number (for a multi sector page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * Support calculating of BCH4/8/16 ECC vectors for one sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * within a page. Sector number is in @i.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) const u_char *dat, u_char *ecc_calc, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int eccbytes = info->nand.ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct gpmc_nand_regs *gpmc_regs = &info->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) u8 *ecc_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ecc_code = ecc_calc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) switch (info->ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) *ecc_code++ = (bch_val4 & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) *ecc_code++ = (bch_val3 & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) *ecc_code++ = (bch_val2 & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *ecc_code++ = (bch_val1 & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) *ecc_code++ = ((bch_val2 & 0xF) << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ((bch_val1 >> 28) & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) *ecc_code++ = ((bch_val1 & 0xF) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) val = readl(gpmc_regs->gpmc_bch_result6[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ecc_code[0] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ecc_code[1] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) val = readl(gpmc_regs->gpmc_bch_result5[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ecc_code[2] = ((val >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ecc_code[3] = ((val >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) ecc_code[4] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ecc_code[5] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) val = readl(gpmc_regs->gpmc_bch_result4[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ecc_code[6] = ((val >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ecc_code[7] = ((val >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ecc_code[8] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ecc_code[9] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) val = readl(gpmc_regs->gpmc_bch_result3[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) ecc_code[10] = ((val >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ecc_code[11] = ((val >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ecc_code[12] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) ecc_code[13] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) val = readl(gpmc_regs->gpmc_bch_result2[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ecc_code[14] = ((val >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) ecc_code[15] = ((val >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ecc_code[16] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ecc_code[17] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) val = readl(gpmc_regs->gpmc_bch_result1[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ecc_code[18] = ((val >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ecc_code[19] = ((val >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ecc_code[20] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ecc_code[21] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) val = readl(gpmc_regs->gpmc_bch_result0[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ecc_code[22] = ((val >> 24) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ecc_code[23] = ((val >> 16) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ecc_code[24] = ((val >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ecc_code[25] = ((val >> 0) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* ECC scheme specific syndrome customizations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) switch (info->ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* Add constant polynomial to remainder, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * ECC of blank pages results in 0x0 on reading back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) for (j = 0; j < eccbytes; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ecc_calc[j] ^= bch4_polynomial[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* Set 8th ECC byte as 0x0 for ROM compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ecc_calc[eccbytes - 1] = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* Add constant polynomial to remainder, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * ECC of blank pages results in 0x0 on reading back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) for (j = 0; j < eccbytes; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ecc_calc[j] ^= bch8_polynomial[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* Set 14th ECC byte as 0x0 for ROM compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ecc_calc[eccbytes - 1] = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * @dat: The pointer to data on which ecc is computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * @ecc_code: The ecc_code buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * when SW based correction is required as ECC is required for one sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) const u_char *dat, u_char *ecc_calc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * @mtd: MTD device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * @dat: The pointer to data on which ecc is computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * @ecc_code: The ecc_code buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) const u_char *dat, u_char *ecc_calc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) int eccbytes = info->nand.ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) unsigned long nsectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) for (i = 0; i < nsectors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) ecc_calc += eccbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * erased_sector_bitflips - count bit flips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * @data: data sector buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * @oob: oob buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * @info: omap_nand_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * Check the bit flips in erased page falls below correctable level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * If falls below, report the page as erased with correctable bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * flip, else report as uncorrectable page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int erased_sector_bitflips(u_char *data, u_char *oob,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct omap_nand_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int flip_bits = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) for (i = 0; i < info->nand.ecc.size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) flip_bits += hweight8(~data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (flip_bits > info->nand.ecc.strength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) flip_bits += hweight8(~oob[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (flip_bits > info->nand.ecc.strength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * Bit flips falls in correctable level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * Fill data area with 0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (flip_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) memset(data, 0xFF, info->nand.ecc.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) memset(oob, 0xFF, info->nand.ecc.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return flip_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * omap_elm_correct_data - corrects page data area in case error reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * @chip: NAND chip object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * @data: page data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * @read_ecc: ecc read from nand flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * @calc_ecc: ecc read from HW ECC registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * Calculated ecc vector reported as zero in case of non-error pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * In case of non-zero ecc vector, first filter out erased-pages, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * then process data via ELM to detect bit-flips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) u_char *read_ecc, u_char *calc_ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct nand_ecc_ctrl *ecc = &info->nand.ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) int eccsteps = info->nand.ecc.steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) int i , j, stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int eccflag, actual_eccbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) u_char *ecc_vec = calc_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) u_char *spare_ecc = read_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) u_char *erased_ecc_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) u_char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int bitflip_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) bool is_error_reported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u32 bit_pos, byte_pos, error_max, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) switch (info->ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /* omit 7th ECC byte reserved for ROM code compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) actual_eccbytes = ecc->bytes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) erased_ecc_vec = bch4_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* omit 14th ECC byte reserved for ROM code compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) actual_eccbytes = ecc->bytes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) erased_ecc_vec = bch8_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) actual_eccbytes = ecc->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) erased_ecc_vec = bch16_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) dev_err(&info->pdev->dev, "invalid driver configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Initialize elm error vector to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) memset(err_vec, 0, sizeof(err_vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) for (i = 0; i < eccsteps ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) eccflag = 0; /* initialize eccflag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * Check any error reported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * In case of error, non zero ecc reported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) for (j = 0; j < actual_eccbytes; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (calc_ecc[j] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) eccflag = 1; /* non zero ecc, error present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (eccflag == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (memcmp(calc_ecc, erased_ecc_vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) actual_eccbytes) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * calc_ecc[] matches pattern for ECC(all 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * so this is definitely an erased-page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) buf = &data[info->nand.ecc.size * i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * count number of 0-bits in read_buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * This check can be removed once a similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * check is introduced in generic NAND driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) bitflip_count = erased_sector_bitflips(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) buf, read_ecc, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (bitflip_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * number of 0-bits within ECC limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * So this may be an erased-page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) stat += bitflip_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * Too many 0-bits. It may be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * - programmed-page, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * - erased-page with many bit-flips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * So this page requires check by ELM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) err_vec[i].error_reported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) is_error_reported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /* Update the ecc vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) calc_ecc += ecc->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) read_ecc += ecc->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Check if any error reported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (!is_error_reported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* Decode BCH error using ELM module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) for (i = 0; i < eccsteps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (err_vec[i].error_uncorrectable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) dev_err(&info->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) "uncorrectable bit-flips found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) err = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) } else if (err_vec[i].error_reported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) for (j = 0; j < err_vec[i].error_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) switch (info->ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* Add 4 bits to take care of padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) pos = err_vec[i].error_loc[j] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) BCH4_BIT_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) pos = err_vec[i].error_loc[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) error_max = (ecc->size + actual_eccbytes) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* Calculate bit position of error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) bit_pos = pos % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /* Calculate byte position of error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) byte_pos = (error_max - pos - 1) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (pos < error_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (byte_pos < 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) pr_debug("bitflip@dat[%d]=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) byte_pos, data[byte_pos]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) data[byte_pos] ^= 1 << bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) pr_debug("bitflip@oob[%d]=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) (byte_pos - 512),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) spare_ecc[byte_pos - 512]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) spare_ecc[byte_pos - 512] ^=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 1 << bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) dev_err(&info->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) "invalid bit-flip @ %d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) byte_pos, bit_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) err = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* Update number of correctable errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) stat = max_t(unsigned int, stat, err_vec[i].error_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* Update page data with sector size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) data += ecc->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) spare_ecc += ecc->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return (err) ? err : stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * omap_write_page_bch - BCH ecc based write page function for entire page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * @chip: nand chip info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * @oob_required: must write chip->oob_poi to OOB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * @page: page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * Custom write page method evolved to support multi sector writing in one shot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) uint8_t *ecc_calc = chip->ecc.calc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) nand_prog_page_begin_op(chip, page, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* Enable GPMC ecc engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) chip->ecc.hwctl(chip, NAND_ECC_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* Write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) chip->legacy.write_buf(chip, buf, mtd->writesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* Update ecc vector from GPMC result registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) chip->ecc.total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /* Write ecc vector to OOB area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return nand_prog_page_end_op(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * omap_write_subpage_bch - BCH hardware ECC based subpage write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * @chip: nand chip info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * @offset: column address of subpage within the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * @data_len: data length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * @oob_required: must write chip->oob_poi to OOB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * @page: page number to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * OMAP optimized subpage write method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) u32 data_len, const u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) u8 *ecc_calc = chip->ecc.calc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) int ecc_size = chip->ecc.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) int ecc_bytes = chip->ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) int ecc_steps = chip->ecc.steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) u32 start_step = offset / ecc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) u32 end_step = (offset + data_len - 1) / ecc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) int step, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * Write entire page at one go as it would be optimal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * as ECC is calculated by hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * ECC is calculated for all subpages but we choose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * only what we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) nand_prog_page_begin_op(chip, page, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* Enable GPMC ECC engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) chip->ecc.hwctl(chip, NAND_ECC_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* Write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) chip->legacy.write_buf(chip, buf, mtd->writesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) for (step = 0; step < ecc_steps; step++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* mask ECC of un-touched subpages by padding 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (step < start_step || step > end_step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) memset(ecc_calc, 0xff, ecc_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) buf += ecc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) ecc_calc += ecc_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* copy calculated ECC for whole page to chip->buffer->oob */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /* this include masked-value(0xFF) for unwritten subpages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ecc_calc = chip->ecc.calc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) chip->ecc.total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /* write OOB buffer to NAND device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return nand_prog_page_end_op(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * omap_read_page_bch - BCH ecc based page read function for entire page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * @chip: nand chip info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * @buf: buffer to store read data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * @oob_required: caller requires OOB data read to chip->oob_poi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * @page: page number to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * used for error correction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * Custom method evolved to support ELM error correction & multi sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * reading. On reading page data area is read along with OOB data with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * ecc engine enabled. ecc vector updated after read of OOB data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * For non error pages ecc vector reported as zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) uint8_t *ecc_calc = chip->ecc.calc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) uint8_t *ecc_code = chip->ecc.code_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) int stat, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) unsigned int max_bitflips = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) nand_read_page_op(chip, page, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* Enable GPMC ecc engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) chip->ecc.hwctl(chip, NAND_ECC_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* Read data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) chip->legacy.read_buf(chip, buf, mtd->writesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /* Read oob bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) nand_change_read_column_op(chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) mtd->writesize + BADBLOCK_MARKER_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) chip->oob_poi + BADBLOCK_MARKER_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) chip->ecc.total, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /* Calculate ecc bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) chip->ecc.total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (stat < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) mtd->ecc_stats.failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) mtd->ecc_stats.corrected += stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) max_bitflips = max_t(unsigned int, max_bitflips, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return max_bitflips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * is_elm_present - checks for presence of ELM module by scanning DT nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * @omap_nand_info: NAND device structure containing platform data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static bool is_elm_present(struct omap_nand_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) struct device_node *elm_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* check whether elm-id is passed via DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!elm_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) pdev = of_find_device_by_node(elm_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /* check whether ELM device is registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) dev_err(&info->pdev->dev, "ELM device not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* ELM module available, now configure it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) info->elm_dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static bool omap2_nand_ecc_check(struct omap_nand_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) switch (info->ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ecc_needs_omap_bch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ecc_needs_bch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ecc_needs_elm = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ecc_needs_omap_bch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ecc_needs_bch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ecc_needs_elm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) ecc_needs_omap_bch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) ecc_needs_bch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ecc_needs_elm = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) dev_err(&info->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) dev_err(&info->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) dev_err(&info->pdev->dev, "ELM not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static const char * const nand_xfer_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) [NAND_OMAP_POLLED] = "polled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct device_node *child = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) const char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) u32 cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (of_property_read_u32(child, "reg", &cs) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dev_err(dev, "reg not found in DT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) info->gpmc_cs = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /* detect availability of ELM module. Won't be present pre-OMAP4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (!info->elm_of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (!info->elm_of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) dev_dbg(dev, "ti,elm-id not in DT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* select ecc-scheme for NAND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) dev_err(dev, "ti,nand-ecc-opt not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!strcmp(s, "sw")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else if (!strcmp(s, "ham1") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) } else if (!strcmp(s, "bch4")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (info->elm_of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) } else if (!strcmp(s, "bch8")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (info->elm_of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) } else if (!strcmp(s, "bch16")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /* select data transfer mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (!strcasecmp(s, nand_xfer_types[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) info->xfer_type = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct mtd_oob_region *oobregion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct nand_chip *chip = &info->nand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) int off = BADBLOCK_MARKER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) !(chip->options & NAND_BUSWIDTH_16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) off = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (section)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) oobregion->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) oobregion->length = chip->ecc.total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static int omap_ooblayout_free(struct mtd_info *mtd, int section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct mtd_oob_region *oobregion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct nand_chip *chip = &info->nand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) int off = BADBLOCK_MARKER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) !(chip->options & NAND_BUSWIDTH_16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) off = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (section)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) off += chip->ecc.total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (off >= mtd->oobsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) oobregion->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) oobregion->length = mtd->oobsize - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) .ecc = omap_ooblayout_ecc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) .free = omap_ooblayout_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct mtd_oob_region *oobregion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct nand_chip *chip = mtd_to_nand(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) int off = BADBLOCK_MARKER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (section >= chip->ecc.steps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * When SW correction is employed, one OMAP specific marker byte is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * reserved after each ECC step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) oobregion->offset = off + (section * (chip->ecc.bytes + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) oobregion->length = chip->ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) struct mtd_oob_region *oobregion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct nand_chip *chip = mtd_to_nand(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) int off = BADBLOCK_MARKER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (section)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * When SW correction is employed, one OMAP specific marker byte is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * reserved after each ECC step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (off >= mtd->oobsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) oobregion->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) oobregion->length = mtd->oobsize - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) .ecc = omap_sw_ooblayout_ecc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) .free = omap_sw_ooblayout_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static int omap_nand_attach_chip(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct device *dev = &info->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) int min_oobbytes = BADBLOCK_MARKER_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) int oobbytes_per_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (chip->bbt_options & NAND_BBT_USE_FLASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) chip->bbt_options |= NAND_BBT_NO_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) chip->options |= NAND_SKIP_BBTSCAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /* Re-populate low-level callbacks based on xfer modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) switch (info->xfer_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) case NAND_OMAP_PREFETCH_POLLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) chip->legacy.read_buf = omap_read_buf_pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) chip->legacy.write_buf = omap_write_buf_pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) case NAND_OMAP_POLLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /* Use nand_base defaults for {read,write}_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) case NAND_OMAP_PREFETCH_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) info->dma = dma_request_chan(dev->parent, "rxtx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (IS_ERR(info->dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) dev_err(dev, "DMA engine request failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return PTR_ERR(info->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) memset(&cfg, 0, sizeof(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) cfg.src_addr = info->phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) cfg.dst_addr = info->phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) cfg.src_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) cfg.dst_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) err = dmaengine_slave_config(info->dma, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) "DMA engine slave config failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) chip->legacy.read_buf = omap_read_buf_dma_pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) chip->legacy.write_buf = omap_write_buf_dma_pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) case NAND_OMAP_PREFETCH_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (info->gpmc_irq_fifo <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) err = devm_request_irq(dev, info->gpmc_irq_fifo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) omap_nand_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) "gpmc-nand-fifo", info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) dev_err(dev, "Requesting IRQ %d, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) info->gpmc_irq_fifo, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) info->gpmc_irq_fifo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (info->gpmc_irq_count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) err = devm_request_irq(dev, info->gpmc_irq_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) omap_nand_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) "gpmc-nand-count", info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) dev_err(dev, "Requesting IRQ %d, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) info->gpmc_irq_count, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) info->gpmc_irq_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) chip->legacy.read_buf = omap_read_buf_irq_pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) chip->legacy.write_buf = omap_write_buf_irq_pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (!omap2_nand_ecc_check(info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * ooblayout instead of using ours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /* Populate MTD interface based on ECC scheme */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) switch (info->ecc_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) case OMAP_ECC_HAM1_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) chip->ecc.bytes = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) chip->ecc.size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) chip->ecc.strength = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) chip->ecc.calculate = omap_calculate_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) chip->ecc.hwctl = omap_enable_hwecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) chip->ecc.correct = omap_correct_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) oobbytes_per_step = chip->ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (!(chip->options & NAND_BUSWIDTH_16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) min_oobbytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) chip->ecc.size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) chip->ecc.bytes = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) chip->ecc.strength = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) chip->ecc.hwctl = omap_enable_hwecc_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) chip->ecc.correct = nand_bch_correct_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) chip->ecc.calculate = omap_calculate_ecc_bch_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /* Reserve one byte for the OMAP marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) oobbytes_per_step = chip->ecc.bytes + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /* Software BCH library is used for locating errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) chip->ecc.priv = nand_bch_init(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (!chip->ecc.priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) dev_err(dev, "Unable to use BCH library\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) case OMAP_ECC_BCH4_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) chip->ecc.size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* 14th bit is kept reserved for ROM-code compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) chip->ecc.bytes = 7 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) chip->ecc.strength = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) chip->ecc.hwctl = omap_enable_hwecc_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) chip->ecc.correct = omap_elm_correct_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) chip->ecc.read_page = omap_read_page_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) chip->ecc.write_page = omap_write_page_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) chip->ecc.write_subpage = omap_write_subpage_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) oobbytes_per_step = chip->ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) err = elm_config(info->elm_dev, BCH4_ECC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) mtd->writesize / chip->ecc.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) chip->ecc.size, chip->ecc.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) chip->ecc.size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) chip->ecc.bytes = 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) chip->ecc.strength = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) chip->ecc.hwctl = omap_enable_hwecc_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) chip->ecc.correct = nand_bch_correct_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) chip->ecc.calculate = omap_calculate_ecc_bch_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /* Reserve one byte for the OMAP marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) oobbytes_per_step = chip->ecc.bytes + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) /* Software BCH library is used for locating errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) chip->ecc.priv = nand_bch_init(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (!chip->ecc.priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) dev_err(dev, "unable to use BCH library\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) case OMAP_ECC_BCH8_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) chip->ecc.size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) /* 14th bit is kept reserved for ROM-code compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) chip->ecc.bytes = 13 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) chip->ecc.strength = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) chip->ecc.hwctl = omap_enable_hwecc_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) chip->ecc.correct = omap_elm_correct_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) chip->ecc.read_page = omap_read_page_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) chip->ecc.write_page = omap_write_page_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) chip->ecc.write_subpage = omap_write_subpage_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) oobbytes_per_step = chip->ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) err = elm_config(info->elm_dev, BCH8_ECC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) mtd->writesize / chip->ecc.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) chip->ecc.size, chip->ecc.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) case OMAP_ECC_BCH16_CODE_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) chip->ecc.size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) chip->ecc.bytes = 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) chip->ecc.strength = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) chip->ecc.hwctl = omap_enable_hwecc_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) chip->ecc.correct = omap_elm_correct_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) chip->ecc.read_page = omap_read_page_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) chip->ecc.write_page = omap_write_page_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) chip->ecc.write_subpage = omap_write_subpage_bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) oobbytes_per_step = chip->ecc.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) err = elm_config(info->elm_dev, BCH16_ECC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) mtd->writesize / chip->ecc.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) chip->ecc.size, chip->ecc.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) dev_err(dev, "Invalid or unsupported ECC scheme\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* Check if NAND device's OOB is enough to store ECC signatures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) min_oobbytes += (oobbytes_per_step *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) (mtd->writesize / chip->ecc.size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (mtd->oobsize < min_oobbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) "Not enough OOB bytes: required = %d, available=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) min_oobbytes, mtd->oobsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static const struct nand_controller_ops omap_nand_controller_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) .attach_chip = omap_nand_attach_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* Shared among all NAND instances to synchronize access to the ECC Engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static struct nand_controller omap_gpmc_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) static bool omap_gpmc_controller_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) static int omap_nand_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) struct omap_nand_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct nand_chip *nand_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) info->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) err = omap_get_dt_info(dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (!info->ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) nand_chip = &info->nand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) mtd = nand_to_mtd(nand_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) mtd->dev.parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) nand_chip->ecc.priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) nand_set_flash_node(nand_chip, dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (!mtd->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) "omap2-nand.%d", info->gpmc_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!mtd->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) dev_err(&pdev->dev, "Failed to set MTD name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) info->phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (!omap_gpmc_controller_initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) omap_gpmc_controller.ops = &omap_nand_controller_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) nand_controller_init(&omap_gpmc_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) omap_gpmc_controller_initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) nand_chip->controller = &omap_gpmc_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) GPIOD_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (IS_ERR(info->ready_gpiod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) dev_err(dev, "failed to get ready gpio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return PTR_ERR(info->ready_gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * If RDY/BSY line is connected to OMAP then use the omap ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * function and the generic nand_wait function which reads the status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * register after monitoring the RDY/BSY line. Otherwise use a standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * chip delay which is slightly more than tR (AC Timing) of the NAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * device and read status register until you get a failure or success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (info->ready_gpiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) nand_chip->legacy.dev_ready = omap_dev_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) nand_chip->legacy.chip_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) nand_chip->legacy.waitfunc = omap_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) nand_chip->legacy.chip_delay = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (info->flash_bbt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /* scan NAND device connected to chip controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) err = nand_scan(nand_chip, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto return_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) err = mtd_device_register(mtd, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) goto cleanup_nand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) platform_set_drvdata(pdev, mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) cleanup_nand:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) nand_cleanup(nand_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) return_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (!IS_ERR_OR_NULL(info->dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) dma_release_channel(info->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (nand_chip->ecc.priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) nand_bch_free(nand_chip->ecc.priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) nand_chip->ecc.priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static int omap_nand_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct mtd_info *mtd = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) struct nand_chip *nand_chip = mtd_to_nand(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct omap_nand_info *info = mtd_to_omap(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (nand_chip->ecc.priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) nand_bch_free(nand_chip->ecc.priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) nand_chip->ecc.priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (info->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) dma_release_channel(info->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) ret = mtd_device_unregister(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) nand_cleanup(nand_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static const struct of_device_id omap_nand_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) { .compatible = "ti,omap2-nand", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) MODULE_DEVICE_TABLE(of, omap_nand_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static struct platform_driver omap_nand_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) .probe = omap_nand_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) .remove = omap_nand_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) .of_match_table = of_match_ptr(omap_nand_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) module_platform_driver(omap_nand_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) MODULE_ALIAS("platform:" DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");