^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Updated, and converted to generic GPIO based driver by Russell King.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by Ben Dooks <ben@simtec.co.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on 2.4 version by Mark Whittaker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * © 2004 Simtec Electronics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Device driver for NAND flash that uses a memory mapped interface to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * read/write the NAND commands and data, and GPIO pins for control signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * (the DT binding refers to this as "GPIO assisted NAND flash")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mtd/rawnand.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mtd/partitions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/mtd/nand-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct gpiomtd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct nand_controller base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void __iomem *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void __iomem *io_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct nand_chip nand_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct gpio_nand_platdata plat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct gpio_desc *nce; /* Optional chip enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct gpio_desc *cle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct gpio_desc *ale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct gpio_desc *rdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct gpio_desc *nwp; /* Optional write protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #ifdef CONFIG_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* gpio_nand_dosync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Make sure the GPIO state changes occur in-order with writes to NAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Needed on PXA due to bus-reordering within the SoC itself (see section on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * I/O ordering in PXA manual (section 2.3, p35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (gpiomtd->io_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Linux memory barriers don't cater for what's required here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * What's required is what's here - a read from a separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * region with a dependency on that read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) tmp = readl(gpiomtd->io_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int gpio_nand_exec_instr(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) const struct nand_op_instr *instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) switch (instr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) case NAND_OP_CMD_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) gpiod_set_value(gpiomtd->cle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) writeb(instr->ctx.cmd.opcode, gpiomtd->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) gpiod_set_value(gpiomtd->cle, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case NAND_OP_ADDR_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) gpiod_set_value(gpiomtd->ale, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) for (i = 0; i < instr->ctx.addr.naddrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) gpiod_set_value(gpiomtd->ale, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case NAND_OP_DATA_IN_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if ((chip->options & NAND_BUSWIDTH_16) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) !instr->ctx.data.force_8bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) instr->ctx.data.len / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) instr->ctx.data.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) case NAND_OP_DATA_OUT_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if ((chip->options & NAND_BUSWIDTH_16) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) !instr->ctx.data.force_8bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) instr->ctx.data.len / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) instr->ctx.data.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case NAND_OP_WAITRDY_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (!gpiomtd->rdy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return nand_gpio_waitrdy(chip, gpiomtd->rdy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) instr->ctx.waitrdy.timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int gpio_nand_exec_op(struct nand_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) const struct nand_operation *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bool check_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (check_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) gpiod_set_value(gpiomtd->nce, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) for (i = 0; i < op->ninstrs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (op->instrs[i].delay_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ndelay(op->instrs[i].delay_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) gpio_nand_dosync(gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) gpiod_set_value(gpiomtd->nce, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static int gpio_nand_attach_chip(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static const struct nand_controller_ops gpio_nand_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) .exec_op = gpio_nand_exec_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) .attach_chip = gpio_nand_attach_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static const struct of_device_id gpio_nand_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) { .compatible = "gpio-control-nand" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int gpio_nand_get_config_of(const struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct gpio_nand_platdata *plat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (val == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) plat->options |= NAND_BUSWIDTH_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } else if (val != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dev_err(dev, "invalid bank-width %u\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) plat->chip_delay = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (of_property_read_u64(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) "gpio-control-nand,io-sync-reg", &addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) r->start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) r->end = r->start + 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) r->flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #else /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline int gpio_nand_get_config_of(const struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct gpio_nand_platdata *plat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline struct resource *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) gpio_nand_get_io_sync_of(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline int gpio_nand_get_config(const struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct gpio_nand_platdata *plat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int ret = gpio_nand_get_config_of(dev, plat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (dev_get_platdata(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static inline struct resource *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) gpio_nand_get_io_sync(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct resource *r = gpio_nand_get_io_sync_of(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int gpio_nand_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct nand_chip *chip = &gpiomtd->nand_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = mtd_device_unregister(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) nand_cleanup(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Enable write protection and disable the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) gpiod_set_value(gpiomtd->nwp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) gpiod_set_value(gpiomtd->nce, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int gpio_nand_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct gpiomtd *gpiomtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct nand_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!dev->of_node && !dev_get_platdata(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!gpiomtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) chip = &gpiomtd->nand_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) gpiomtd->io = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (IS_ERR(gpiomtd->io))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return PTR_ERR(gpiomtd->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) res = gpio_nand_get_io_sync(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) gpiomtd->io_sync = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (IS_ERR(gpiomtd->io_sync))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return PTR_ERR(gpiomtd->io_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ret = gpio_nand_get_config(dev, &gpiomtd->plat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Just enable the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (IS_ERR(gpiomtd->nce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return PTR_ERR(gpiomtd->nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* We disable write protection once we know probe() will succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (IS_ERR(gpiomtd->nwp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ret = PTR_ERR(gpiomtd->nwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto out_ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (IS_ERR(gpiomtd->ale)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ret = PTR_ERR(gpiomtd->ale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto out_ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (IS_ERR(gpiomtd->cle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ret = PTR_ERR(gpiomtd->cle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto out_ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (IS_ERR(gpiomtd->rdy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = PTR_ERR(gpiomtd->rdy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto out_ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) nand_controller_init(&gpiomtd->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) gpiomtd->base.ops = &gpio_nand_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) nand_set_flash_node(chip, pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) chip->options = gpiomtd->plat.options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) chip->controller = &gpiomtd->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mtd->dev.parent = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) platform_set_drvdata(pdev, gpiomtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Disable write protection, if wired up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) gpiod_direction_output(gpiomtd->nwp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * This driver assumes that the default ECC engine should be TYPE_SOFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Set ->engine_type before registering the NAND devices in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * provide a driver specific default value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ret = nand_scan(chip, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) goto err_wp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (gpiomtd->plat.adjust_parts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ret = mtd_device_register(mtd, gpiomtd->plat.parts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) gpiomtd->plat.num_parts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) err_wp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) gpiod_set_value(gpiomtd->nwp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) out_ce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) gpiod_set_value(gpiomtd->nce, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static struct platform_driver gpio_nand_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .probe = gpio_nand_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .remove = gpio_nand_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .name = "gpio-nand",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .of_match_table = of_match_ptr(gpio_nand_id_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) module_platform_driver(gpio_nand_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) MODULE_DESCRIPTION("GPIO NAND Driver");