^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Arasan NAND Flash Controller Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 - 2020 Xilinx, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Miquel Raynal <miquel.raynal@bootlin.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Original work (fully rewritten):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Naga Sureshkumar Relli <nagasure@xilinx.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mtd/partitions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mtd/rawnand.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PKT_REG 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define MEM_ADDR1_REG 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define MEM_ADDR2_REG 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CMD_REG 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CMD_DMA_ENABLE BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CMD_ECC_ENABLE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PROG_REG 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define PROG_PGRD BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PROG_ERASE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PROG_STATUS BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PROG_PGPROG BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PROG_RDID BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PROG_RDPARAM BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PROG_RST BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define PROG_GET_FEATURE BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PROG_SET_FEATURE BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define INTR_STS_EN_REG 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define INTR_SIG_EN_REG 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define INTR_STS_REG 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define WRITE_READY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define READ_READY BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define XFER_COMPLETE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DMA_BOUNDARY BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define EVENT_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define READY_STS_REG 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define DMA_ADDR0_REG 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define DMA_ADDR1_REG 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define FLASH_STS_REG 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define DATA_PORT_REG 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define ECC_CONF_REG 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define ECC_CONF_BCH_EN BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define ECC_ERR_CNT_REG 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define ECC_SP_REG 0x3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define ECC_1ERR_CNT_REG 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define ECC_2ERR_CNT_REG 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define DATA_INTERFACE_REG 0x6C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define DIFACE_SDR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define DIFACE_NVDDR BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define ANFC_MAX_CS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define ANFC_DFLT_TIMEOUT_US 1000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define ANFC_MAX_CHUNK_SIZE SZ_1M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define ANFC_MAX_PARAM_SIZE SZ_4K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define ANFC_MAX_STEPS SZ_2K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define ANFC_MAX_PKT_SIZE (SZ_2K - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define ANFC_MAX_ADDR_CYC 5U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define ANFC_RSVD_ECC_BYTES 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * struct anfc_op - Defines how to execute an operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @pkt_reg: Packet register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @addr1_reg: Memory address 1 register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @addr2_reg: Memory address 2 register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @cmd_reg: Command register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @prog_reg: Program register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @steps: Number of "packets" to read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @len: Data transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @read: Data transfer direction from the controller point of view
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct anfc_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 pkt_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u32 addr1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 addr2_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 cmd_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 prog_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned int rdy_timeout_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bool read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * struct anand - Defines the NAND chip related information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @node: Used to store NAND chips into a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @chip: NAND chip information structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @cs: Chip select line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @rb: Ready-busy line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @page_sz: Register value of the page_sz field to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @clk: Expected clock frequency to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @timings: Data interface timing mode to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @ecc_conf: Hardware ECC configuration value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @strength: Register value of the ECC strength
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @raddr_cycles: Row address cycle information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @caddr_cycles: Column address cycle information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @ecc_bits: Exact number of ECC bits per syndrome
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @ecc_total: Total number of ECC bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @errloc: Array of errors located with soft BCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @hw_ecc: Buffer to store syndromes computed by hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @bch: BCH structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct anand {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct nand_chip chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int page_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 timings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 ecc_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u32 strength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u16 raddr_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u16 caddr_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int ecc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int ecc_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int *errloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u8 *hw_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct bch_control *bch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @dev: Pointer to the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @base: Remapped register area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @controller_clk: Pointer to the system clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @bus_clk: Pointer to the flash clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @controller: Base controller structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @chips: List of all NAND chips attached to the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @assigned_cs: Bitmask describing already assigned CS lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @cur_clk: Current clock rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct arasan_nfc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct clk *controller_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct clk *bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct nand_controller controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct list_head chips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long assigned_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int cur_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static struct anand *to_anand(struct nand_chip *nand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return container_of(nand, struct anand, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return container_of(ctrl, struct arasan_nfc, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) val & event, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ANFC_DFLT_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) writel_relaxed(event, nfc->base + INTR_STS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int timeout_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* There is no R/B interrupt, we must poll a register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) val & BIT(anand->rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 1, timeout_ms * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) readl_relaxed(nfc->base + READY_STS_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned int *pktsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned int nb, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) sz = len / nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (sz <= ANFC_MAX_PKT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (sz * nb != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (steps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *steps = nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (pktsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *pktsize = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int anfc_select_target(struct nand_chip *chip, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Update the controller timings and the potential ECC configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Update clock frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (nfc->cur_clk != anand->clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) clk_disable_unprepare(nfc->controller_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ret = clk_set_rate(nfc->controller_clk, anand->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dev_err(nfc->dev, "Failed to change clock rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = clk_prepare_enable(nfc->controller_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dev_err(nfc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) "Failed to re-enable the controller clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) nfc->cur_clk = anand->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * When using the embedded hardware ECC engine, the controller is in charge of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * feeding the engine with, first, the ECC residue present in the data array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * A typical read operation is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * 1/ Assert the read operation by sending the relevant command/address cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * but targeting the column of the first ECC bytes in the OOB area instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * the main data directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * 2/ After having read the relevant number of ECC bytes, the controller uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Register" to move the pointer back at the beginning of the main data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * 3/ It will read the content of the main area for a given size (pktsize) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * will feed the ECC engine with this buffer again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * 4/ The ECC engine derives the ECC bytes for the given data and compare them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * with the ones already received. It eventually trigger status flags and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * then set the "Buffer Read Ready" flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * 5/ The corrected data is then available for reading from the data port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * reports uncorrectable errors. Because of this bug, we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * software BCH implementation in the read path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned int max_bitflips = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int step, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct anfc_op nfc_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .pkt_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) PKT_SIZE(chip->ecc.size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) PKT_STEPS(chip->ecc.steps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) .addr1_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) (page & 0xFF) << (8 * (anand->caddr_cycles)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .addr2_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ((page >> 16) & 0xFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ADDR2_STRENGTH(anand->strength) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ADDR2_CS(anand->cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .cmd_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) CMD_1(NAND_CMD_READ0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) CMD_2(NAND_CMD_READSTART) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) CMD_PAGE_SIZE(anand->page_sz) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) CMD_DMA_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) CMD_NADDRS(anand->caddr_cycles +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) anand->raddr_cycles),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .prog_reg = PROG_PGRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (dma_mapping_error(nfc->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev_err(nfc->dev, "Buffer mapping error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) anfc_trigger_op(nfc, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_err(nfc->dev, "Error reading page %d\n", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Store the raw OOB bytes as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mtd->oobsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * For each step, compute by softare the BCH syndrome over the raw data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * Compare the theoretical amount of errors and compare with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * hardware engine feedback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) for (step = 0; step < chip->ecc.steps; step++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u8 *raw_buf = &buf[step * chip->ecc.size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int bit, byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int bf, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Extract the syndrome, it is not necessarily aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) memset(anand->hw_ecc, 0, chip->ecc.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) nand_extract_bits(anand->hw_ecc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) &chip->oob_poi[mtd->oobsize - anand->ecc_total],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) anand->ecc_bits * step, anand->ecc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) anand->hw_ecc, NULL, NULL, anand->errloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!bf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else if (bf > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) for (i = 0; i < bf; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Only correct the data, not the syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (anand->errloc[i] < (chip->ecc.size * 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bit = BIT(anand->errloc[i] & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) byte = anand->errloc[i] >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) raw_buf[byte] ^= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mtd->ecc_stats.corrected += bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) max_bitflips = max_t(unsigned int, max_bitflips, bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) NULL, 0, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) chip->ecc.strength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (bf > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mtd->ecc_stats.corrected += bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) max_bitflips = max_t(unsigned int, max_bitflips, bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) memset(raw_buf, 0xFF, chip->ecc.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } else if (bf < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) mtd->ecc_stats.failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = anfc_select_target(chip, chip->cur_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct anfc_op nfc_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .pkt_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) PKT_SIZE(chip->ecc.size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) PKT_STEPS(chip->ecc.steps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .addr1_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) (page & 0xFF) << (8 * (anand->caddr_cycles)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .addr2_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ((page >> 16) & 0xFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ADDR2_STRENGTH(anand->strength) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ADDR2_CS(anand->cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .cmd_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) CMD_1(NAND_CMD_SEQIN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) CMD_2(NAND_CMD_PAGEPROG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) CMD_PAGE_SIZE(anand->page_sz) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) CMD_DMA_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) CMD_NADDRS(anand->caddr_cycles +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) anand->raddr_cycles) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) CMD_ECC_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .prog_reg = PROG_PGPROG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ECC_SP_ADDRS(anand->caddr_cycles),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) nfc->base + ECC_SP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (dma_mapping_error(nfc->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dev_err(nfc->dev, "Buffer mapping error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) anfc_trigger_op(nfc, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dev_err(nfc->dev, "Error writing page %d\n", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Spare data is not protected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (oob_required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ret = nand_write_oob_std(chip, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int oob_required, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = anfc_select_target(chip, chip->cur_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* NAND framework ->exec_op() hooks and related helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static int anfc_parse_instructions(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) const struct nand_subop *subop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct anfc_op *nfc_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) const struct nand_op_instr *instr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) bool first_cmd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned int op_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) memset(nfc_op, 0, sizeof(*nfc_op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) nfc_op->addr2_reg = ADDR2_CS(anand->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (op_id = 0; op_id < subop->ninstrs; op_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned int offset, naddrs, pktsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) const u8 *addrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) instr = &subop->instrs[op_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) switch (instr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case NAND_OP_CMD_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (first_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) first_cmd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) case NAND_OP_ADDR_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) offset = nand_subop_get_addr_start_off(subop, op_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) addrs = &instr->ctx.addr.addrs[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (i < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) nfc_op->addr2_reg |= addrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) case NAND_OP_DATA_IN_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) nfc_op->read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) case NAND_OP_DATA_OUT_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) offset = nand_subop_get_data_start_off(subop, op_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) buf = instr->ctx.data.buf.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) nfc_op->buf = &buf[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) nfc_op->len = nand_subop_get_data_len(subop, op_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) &pktsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Number of DATA cycles must be aligned on 4, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * means the controller might read/write more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * requested. This is harmless most of the time as extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * DATA are discarded in the write path and read pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * adjusted in the read path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * FIXME: The core should mark operations where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * reading/writing more is allowed so the exec_op()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * implementation can take the right decision when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * alignment constraint is not met: adjust the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * DATA cycles when it's allowed, reject the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) PKT_STEPS(nfc_op->steps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case NAND_OP_WAITRDY_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned int last_len = nfc_op->len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned int offset, dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u8 *buf = nfc_op->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) for (i = 0; i < nfc_op->steps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dir = nfc_op->read ? READ_READY : WRITE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ret = anfc_wait_for_event(nfc, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dev_err(nfc->dev, "PIO %s ready signal not received\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) nfc_op->read ? "Read" : "Write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) offset = i * (dwords * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (nfc_op->read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (last_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) u32 remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) offset = nfc_op->len - last_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (nfc_op->read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) memcpy(&buf[offset], &remainder, last_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) memcpy(&remainder, &buf[offset], last_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return anfc_wait_for_event(nfc, XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static int anfc_misc_data_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) const struct nand_subop *subop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) u32 prog_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct anfc_op nfc_op = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ret = anfc_parse_instructions(chip, subop, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) nfc_op.prog_reg = prog_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) anfc_trigger_op(nfc, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (nfc_op.rdy_timeout_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return anfc_rw_pio_op(nfc, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static int anfc_param_read_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static int anfc_data_read_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return anfc_misc_data_type_exec(chip, subop, PROG_PGRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int anfc_param_write_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int anfc_data_write_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) const struct nand_subop *subop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) u32 prog_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct anfc_op nfc_op = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ret = anfc_parse_instructions(chip, subop, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) nfc_op.prog_reg = prog_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) anfc_trigger_op(nfc, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (nfc_op.rdy_timeout_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static int anfc_status_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* See anfc_check_op() for details about this constraint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static int anfc_reset_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static int anfc_erase_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static int anfc_wait_type_exec(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) const struct nand_subop *subop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct anfc_op nfc_op = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = anfc_parse_instructions(chip, subop, &nfc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) anfc_param_read_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) anfc_param_write_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) anfc_data_read_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) anfc_data_write_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) NAND_OP_PARSER_PAT_CMD_ELEM(false)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) anfc_reset_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) anfc_erase_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) anfc_status_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) NAND_OP_PARSER_PAT_CMD_ELEM(false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) NAND_OP_PARSER_PATTERN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) anfc_wait_type_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static int anfc_check_op(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) const struct nand_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) const struct nand_op_instr *instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int op_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * The controller abstracts all the NAND operations and do not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * data only operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * TODO: The nand_op_parser framework should be extended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * support custom checks on DATA instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) for (op_id = 0; op_id < op->ninstrs; op_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) instr = &op->instrs[op_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) switch (instr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) case NAND_OP_ADDR_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) case NAND_OP_DATA_IN_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case NAND_OP_DATA_OUT_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * The controller does not allow to proceed with a CMD+DATA_IN cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * manually on the bus by reading data from the data register. Instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * the controller abstract a status read operation with its own status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * register after ordering a read status operation. Hence, we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * support any CMD+DATA_IN operation other than a READ STATUS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * TODO: The nand_op_parser() framework should be extended to describe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * fixed patterns instead of open-coding this check here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (op->ninstrs == 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) op->instrs[0].type == NAND_OP_CMD_INSTR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static int anfc_exec_op(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) const struct nand_operation *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) bool check_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (check_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return anfc_check_op(chip, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ret = anfc_select_target(chip, op->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static int anfc_setup_interface(struct nand_chip *chip, int target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) const struct nand_interface_config *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct device_node *np = nfc->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (target < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * with f > 90MHz (default clock is 100MHz) but signals are unstable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * with higher modes. Hence we decrease a little bit the clock rate to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * 80MHz when using modes 2-5 with this SoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) conf->timings.mode >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) unsigned int bch_gf_mag, ecc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) switch (step_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) case SZ_512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) bch_gf_mag = 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) case SZ_1K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) bch_gf_mag = 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ecc_bits = bch_gf_mag * strength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return DIV_ROUND_UP(ecc_bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static const int anfc_hw_ecc_1024_strengths[] = {24};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .stepsize = SZ_512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .strengths = anfc_hw_ecc_512_strengths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .stepsize = SZ_1K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) .strengths = anfc_hw_ecc_1024_strengths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static const struct nand_ecc_caps anfc_hw_ecc_caps = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .stepinfos = anfc_hw_ecc_step_infos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct nand_ecc_ctrl *ecc = &chip->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) switch (mtd->writesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) case SZ_512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case SZ_2K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case SZ_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) case SZ_8K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) case SZ_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) switch (ecc->strength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) anand->strength = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) anand->strength = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) anand->strength = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) anand->strength = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) switch (ecc->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) case SZ_512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) bch_gf_mag = 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) bch_prim_poly = 0x201b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) case SZ_1K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) bch_gf_mag = 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) bch_prim_poly = 0x4443;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ecc->steps = mtd->writesize / ecc->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ecc->algo = NAND_ECC_ALGO_BCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) anand->ecc_bits = bch_gf_mag * ecc->strength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ECC_CONF_LEN(anand->ecc_total) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ECC_CONF_BCH_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) sizeof(*anand->errloc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!anand->errloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!anand->hw_ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* Enforce bit swapping to fit the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (!anand->bch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ecc->read_page = anfc_sel_read_page_hw_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ecc->write_page = anfc_sel_write_page_hw_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int anfc_attach_chip(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct arasan_nfc *nfc = to_anfc(chip->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (mtd->writesize <= SZ_512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) anand->caddr_cycles = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) anand->caddr_cycles = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (chip->options & NAND_ROW_ADDR_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) anand->raddr_cycles = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) anand->raddr_cycles = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) switch (mtd->writesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) case 512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) anand->page_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) case 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) anand->page_sz = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) case 2048:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) anand->page_sz = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) case 4096:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) anand->page_sz = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) case 8192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) anand->page_sz = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) case 16384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) anand->page_sz = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* These hooks are valid for all ECC providers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) switch (chip->ecc.engine_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) case NAND_ECC_ENGINE_TYPE_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) case NAND_ECC_ENGINE_TYPE_SOFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) case NAND_ECC_ENGINE_TYPE_ON_DIE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) case NAND_ECC_ENGINE_TYPE_ON_HOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ret = anfc_init_hw_ecc_controller(nfc, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) chip->ecc.engine_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static void anfc_detach_chip(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct anand *anand = to_anand(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (anand->bch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) bch_free(anand->bch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static const struct nand_controller_ops anfc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .exec_op = anfc_exec_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .setup_interface = anfc_setup_interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .attach_chip = anfc_attach_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .detach_chip = anfc_detach_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct anand *anand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct nand_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int cs, rb, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!anand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* We do not support multiple CS per chip yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dev_err(nfc->dev, "Invalid reg property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ret = of_property_read_u32(np, "reg", &cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ret = of_property_read_u32(np, "nand-rb", &rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (test_and_set_bit(cs, &nfc->assigned_cs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_err(nfc->dev, "Already assigned CS %d\n", cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) anand->cs = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) anand->rb = rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) chip = &anand->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) mtd->dev.parent = nfc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) chip->controller = &nfc->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) NAND_USES_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) nand_set_flash_node(chip, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (!mtd->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dev_err(nfc->dev, "NAND label property is mandatory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ret = nand_scan(chip, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) dev_err(nfc->dev, "Scan operation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ret = mtd_device_register(mtd, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) nand_cleanup(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) list_add_tail(&anand->node, &nfc->chips);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static void anfc_chips_cleanup(struct arasan_nfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct anand *anand, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct nand_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) chip = &anand->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ret = mtd_device_unregister(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) nand_cleanup(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) list_del(&anand->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int anfc_chips_init(struct arasan_nfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct device_node *np = nfc->dev->of_node, *nand_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int nchips = of_get_child_count(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (!nchips || nchips > ANFC_MAX_CS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) nchips);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) for_each_child_of_node(np, nand_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ret = anfc_chip_init(nfc, nand_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) of_node_put(nand_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) anfc_chips_cleanup(nfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void anfc_reset(struct arasan_nfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* Disable interrupt signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Enable interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static int anfc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct arasan_nfc *nfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) nfc->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) nand_controller_init(&nfc->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) nfc->controller.ops = &anfc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) INIT_LIST_HEAD(&nfc->chips);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) nfc->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (IS_ERR(nfc->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return PTR_ERR(nfc->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) anfc_reset(nfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (IS_ERR(nfc->controller_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return PTR_ERR(nfc->controller_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (IS_ERR(nfc->bus_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return PTR_ERR(nfc->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ret = clk_prepare_enable(nfc->controller_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ret = clk_prepare_enable(nfc->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) goto disable_controller_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ret = anfc_chips_init(nfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto disable_bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) platform_set_drvdata(pdev, nfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) disable_bus_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) clk_disable_unprepare(nfc->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) disable_controller_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) clk_disable_unprepare(nfc->controller_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static int anfc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct arasan_nfc *nfc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) anfc_chips_cleanup(nfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) clk_disable_unprepare(nfc->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) clk_disable_unprepare(nfc->controller_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static const struct of_device_id anfc_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) .compatible = "xlnx,zynqmp-nand-controller",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .compatible = "arasan,nfc-v3p10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) MODULE_DEVICE_TABLE(of, anfc_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static struct platform_driver anfc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .name = "arasan-nand-controller",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .of_match_table = anfc_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .probe = anfc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .remove = anfc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) module_platform_driver(anfc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");