Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright © 2009 - Maxim Levitsky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * driver for Ricoh xD readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #define DRV_NAME "r852"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define pr_fmt(fmt)  DRV_NAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "sm_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "r852.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static bool r852_enable_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) module_param(r852_enable_dma, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static int debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) module_param(debug, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) MODULE_PARM_DESC(debug, "Debug level (0-2)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /* read register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	uint8_t reg = readb(dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /* write register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static inline void r852_write_reg(struct r852_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 						int address, uint8_t value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	writeb(value, dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /* read dword sized register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) /* write dword sized register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) static inline void r852_write_reg_dword(struct r852_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 							int address, uint32_t value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	writel(cpu_to_le32(value), dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* returns pointer to our private structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	struct nand_chip *chip = mtd_to_nand(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	return nand_get_controller_data(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /* check if controller supports dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void r852_dma_test(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		(R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	if (!dev->dma_usable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		message("Non dma capable device detected, dma disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (!r852_enable_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		message("disabling dma on user request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		dev->dma_usable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * Enable dma. Enables ether first or second stage of the DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * Expects dev->dma_dir and dev->dma_state be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void r852_dma_enable(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	uint8_t dma_reg, dma_irq_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/* Set up dma settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	if (dev->dma_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		dma_reg |= R852_DMA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (dev->dma_state == DMA_INTERNAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		dma_reg |= R852_DMA_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		/* Precaution to make sure HW doesn't write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			/* to random kernel memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		r852_write_reg_dword(dev, R852_DMA_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			cpu_to_le32(dev->phys_bounce_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		dma_reg |= R852_DMA_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		r852_write_reg_dword(dev, R852_DMA_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			cpu_to_le32(dev->phys_dma_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	/* Precaution: make sure write reached the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	r852_read_reg_dword(dev, R852_DMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* Set dma irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		dma_irq_reg |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		R852_DMA_IRQ_INTERNAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		R852_DMA_IRQ_ERROR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		R852_DMA_IRQ_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * Disable dma, called from the interrupt handler, which specifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * success of the operation via 'error' argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static void r852_dma_done(struct r852_device *dev, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	WARN_ON(dev->dma_stage == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	/* Precaution to make sure HW doesn't write to random kernel memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	r852_write_reg_dword(dev, R852_DMA_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		cpu_to_le32(dev->phys_bounce_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	r852_read_reg_dword(dev, R852_DMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	dev->dma_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	dev->dma_stage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			R852_DMA_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * Wait, till dma is done, which includes both phases of it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static int r852_dma_wait(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	long timeout = wait_for_completion_timeout(&dev->dma_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				msecs_to_jiffies(1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		dbg("timeout waiting for DMA interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * Read/Write one page using dma. Only pages can be read (512 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	int bounce = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	dev->dma_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* Set dma direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	dev->dma_dir = do_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	dev->dma_stage = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	reinit_completion(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	dbg_verbose("doing dma %s ", do_read ? "read" : "write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	/* Set initial dma state: for reading first fill on board buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	  from device, for writes first fill the buffer  from memory*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/* if incoming buffer is not page aligned, we should do bounce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if ((unsigned long)buf & (R852_DMA_LEN-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		bounce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (!bounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			R852_DMA_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			bounce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (bounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		dbg_verbose("dma: using bounce buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		dev->phys_dma_addr = dev->phys_bounce_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		if (!do_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/* Enable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	spin_lock_irqsave(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	r852_dma_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	spin_unlock_irqrestore(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	/* Wait till complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	error = r852_dma_wait(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		r852_dma_done(dev, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (do_read && bounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * Program data lines of the nand chip to send data to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	uint32_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	/* Don't allow any access to hardware if we suspect card removal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	/* Special case for whole sector read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (len == R852_DMA_LEN && dev->dma_usable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		r852_do_dma(dev, (uint8_t *)buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	/* write DWORD chinks - faster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	while (len >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		r852_write_reg_dword(dev, R852_DATALINE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	/* write rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		r852_write_reg(dev, R852_DATALINE, *buf++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * Read data lines of the nand chip to retrieve data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	uint32_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	if (dev->card_unstable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		/* since we can't signal error here, at least, return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			predictable buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		memset(buf, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	/* special case for whole sector read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (len == R852_DMA_LEN && dev->dma_usable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		r852_do_dma(dev, buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	/* read in dword sized chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	while (len >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		reg = r852_read_reg_dword(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		*buf++ = reg & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		*buf++ = (reg >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		*buf++ = (reg >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		*buf++ = (reg >> 24) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	/* read the reset by bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	while (len--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		*buf++ = r852_read_reg(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * Read one byte from nand chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static uint8_t r852_read_byte(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* Same problem as in r852_read_buf.... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return r852_read_reg(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * Control several chip lines & send commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (ctrl & NAND_CTRL_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 				 R852_CTL_ON | R852_CTL_CARDENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		if (ctrl & NAND_ALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			dev->ctlreg |= R852_CTL_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		if (ctrl & NAND_CLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			dev->ctlreg |= R852_CTL_COMMAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (ctrl & NAND_NCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			dev->ctlreg &= ~R852_CTL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		/* when write is stareted, enable write access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		if (dat == NAND_CMD_ERASE1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			dev->ctlreg |= R852_CTL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		r852_write_reg(dev, R852_CTL, dev->ctlreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		to set write mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		dev->ctlreg |= R852_CTL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		r852_write_reg(dev, R852_CTL, dev->ctlreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (dat != NAND_CMD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		r852_write_reg(dev, R852_DATALINE, dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * Wait till card is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * based on nand_wait, but returns errors on DMA error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) static int r852_wait(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	struct r852_device *dev = nand_get_controller_data(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	timeout = jiffies + msecs_to_jiffies(400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	while (time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		if (chip->legacy.dev_ready(chip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	nand_status_op(chip, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	/* Unfortunelly, no way to send detailed error status... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (dev->dma_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		status |= NAND_STATUS_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		dev->dma_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * Check if card is ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) static int r852_ready(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  * Set ECC engine mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static void r852_ecc_hwctl(struct nand_chip *chip, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	case NAND_ECC_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	case NAND_ECC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		/* enable ecc generation/check*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		dev->ctlreg |= R852_CTL_ECC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		/* flush ecc buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		r852_write_reg(dev, R852_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			dev->ctlreg | R852_CTL_ECC_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		r852_read_reg_dword(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		r852_write_reg(dev, R852_CTL, dev->ctlreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	case NAND_ECC_READSYN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		/* disable ecc generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		r852_write_reg(dev, R852_CTL, dev->ctlreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * Calculate ECC, only used for writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			      uint8_t *ecc_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct sm_oob *oob = (struct sm_oob *)ecc_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	uint32_t ecc1, ecc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	oob->ecc1[0] = (ecc1) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	oob->ecc2[0] = (ecc2) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	r852_write_reg(dev, R852_CTL, dev->ctlreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * Correct the data using ECC, hw did almost everything for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			    uint8_t *read_ecc, uint8_t *calc_ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	uint32_t ecc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	uint8_t ecc_status, err_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	int i, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	if (dev->dma_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		dev->dma_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	r852_write_reg(dev, R852_CTL, dev->ctlreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	for (i = 0 ; i <= 1 ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		ecc_status = (ecc_reg >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		/* ecc uncorrectable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (ecc_status & R852_ECC_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			dbg("ecc: unrecoverable error, in half %d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			error = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		/* correctable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		if (ecc_status & R852_ECC_CORRECTABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			err_byte = ecc_reg & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			dbg("ecc: recoverable error, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 				"in half %d, byte %d, bit %d", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 				err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			dat[err_byte] ^=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 				1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		dat += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		ecc_reg >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * This is copy of nand_read_oob_std
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * nand_read_oob_syndrome assumes we can send column address - we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static int r852_read_oob(struct nand_chip *chip, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct mtd_info *mtd = nand_to_mtd(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * Start the nand engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static void r852_engine_enable(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	msleep(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	r852_write_reg(dev, R852_CTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * Stop the nand engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) static void r852_engine_disable(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	r852_write_reg_dword(dev, R852_HW, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * Test if card is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) static void r852_card_update_present(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	uint8_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	spin_lock_irqsave(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	reg = r852_read_reg(dev, R852_CARD_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	spin_unlock_irqrestore(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * Update card detection IRQ state according to current card state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * which is read in r852_card_update_present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) static void r852_update_card_detect(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	dev->card_unstable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	card_detect_reg |= R852_CARD_IRQ_GENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	card_detect_reg |= dev->card_detected ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static ssize_t r852_media_type_show(struct device *sys_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct r852_device *dev = r852_get_dev(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	char *data = dev->sm ? "smartmedia" : "xd";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	strcpy(buf, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return strlen(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) /* Detect properties of card in slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) static void r852_update_media_status(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	uint8_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	int readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	spin_lock_irqsave(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (!dev->card_detected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		message("card removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		spin_unlock_irqrestore(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		return ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	readonly  = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	reg = r852_read_reg(dev, R852_DMA_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	message("detected %s %s card in slot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		dev->sm ? "SmartMedia" : "xD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		readonly ? "readonly" : "writeable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	dev->readonly = readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	spin_unlock_irqrestore(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  * Register the nand device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  * Called when the card is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) static int r852_register_nand_device(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	struct mtd_info *mtd = nand_to_mtd(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	WARN_ON(dev->card_registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	mtd->dev.parent = &dev->pci_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (dev->readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		dev->chip->options |= NAND_ROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	r852_engine_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	if (sm_register_device(mtd, dev->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		message("can't create media type sysfs attribute");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		goto error3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	dev->card_registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) error3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	nand_cleanup(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	/* Force card redetect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	dev->card_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * Unregister the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) static void r852_unregister_nand_device(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	struct mtd_info *mtd = nand_to_mtd(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (!dev->card_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	device_remove_file(&mtd->dev, &dev_attr_media_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	WARN_ON(mtd_device_unregister(mtd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	nand_cleanup(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	r852_engine_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	dev->card_registered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) /* Card state updater */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static void r852_card_detect_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	struct r852_device *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		container_of(work, struct r852_device, card_detect_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	r852_card_update_present(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	r852_update_card_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	dev->card_unstable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	/* False alarm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (dev->card_detected == dev->card_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/* Read media properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	r852_update_media_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Register the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (dev->card_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		r852_register_nand_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		r852_unregister_nand_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	r852_update_card_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) /* Ack + disable IRQ generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) static void r852_disable_irqs(struct r852_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	uint8_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 					reg & ~R852_DMA_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) /* Interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static irqreturn_t r852_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct r852_device *dev = (struct r852_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	uint8_t card_status, dma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	spin_lock_irqsave(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* handle card detection interrupts first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		/* we shouldn't receive any interrupts if we wait for card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		WARN_ON(dev->card_unstable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		/* disable irqs while card is unstable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		/* this will timeout DMA if active, but better that garbage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		r852_disable_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		if (dev->card_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		/* let, card state to settle a bit, and then do the work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		dev->card_unstable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		queue_delayed_work(dev->card_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			&dev->card_detect_work, msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* Handle dma interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	if (dma_status & R852_DMA_IRQ_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		if (dma_status & R852_DMA_IRQ_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			dbg("received dma error IRQ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			r852_dma_done(dev, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			complete(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		/* received DMA interrupt out of nowhere? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		WARN_ON_ONCE(dev->dma_stage == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		if (dev->dma_stage == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		/* done device access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		if (dev->dma_state == DMA_INTERNAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				(dma_status & R852_DMA_IRQ_INTERNAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			dev->dma_state = DMA_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			dev->dma_stage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		/* done memory DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		if (dev->dma_state == DMA_MEMORY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				(dma_status & R852_DMA_IRQ_MEMORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			dev->dma_state = DMA_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			dev->dma_stage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		/* Enable 2nd half of dma dance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (dev->dma_stage == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			r852_dma_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		/* Operation done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (dev->dma_stage == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			r852_dma_done(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			complete(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	/* Handle unknown interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (dma_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		dbg("bad dma IRQ status = %x", dma_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (card_status & ~R852_CARD_STA_CD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		dbg("strange card status = %x", card_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	spin_unlock_irqrestore(&dev->irqlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static int r852_attach_chip(struct nand_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	chip->ecc.size = R852_DMA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	chip->ecc.bytes = SM_OOB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	chip->ecc.strength = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	chip->ecc.hwctl = r852_ecc_hwctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	chip->ecc.calculate = r852_ecc_calculate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	chip->ecc.correct = r852_ecc_correct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	/* TODO: hack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	chip->ecc.read_oob = r852_read_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static const struct nand_controller_ops r852_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	.attach_chip = r852_attach_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) static int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	struct nand_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct r852_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* pci initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	error = pci_enable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	pci_set_master(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	error = pci_request_regions(pci_dev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		goto error3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* init nand chip, but register it only on card insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (!chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		goto error4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	/* commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	chip->legacy.cmd_ctrl = r852_cmdctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	chip->legacy.waitfunc = r852_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	chip->legacy.dev_ready = r852_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/* I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	chip->legacy.read_byte = r852_read_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	chip->legacy.read_buf = r852_read_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	chip->legacy.write_buf = r852_write_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/* init our device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		goto error5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	nand_set_controller_data(chip, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	dev->chip = chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	dev->pci_dev = pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	pci_set_drvdata(pci_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	nand_controller_init(&dev->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	dev->controller.ops = &r852_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	chip->controller = &dev->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		&dev->phys_bounce_buffer, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (!dev->bounce_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		goto error6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	dev->mmio = pci_ioremap_bar(pci_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (!dev->mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		goto error7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (!dev->tmp_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		goto error8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	init_completion(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (!dev->card_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		goto error9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	/* shutdown everything - precation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	r852_engine_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	r852_disable_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	r852_dma_test(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	dev->irq = pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	spin_lock_init(&dev->irqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	dev->card_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	r852_card_update_present(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/*register irq handler*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			  DRV_NAME, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		goto error10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	/* kick initial present test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	queue_delayed_work(dev->card_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		&dev->card_detect_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	pr_notice("driver loaded successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) error10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	destroy_workqueue(dev->card_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) error9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	kfree(dev->tmp_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) error8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	pci_iounmap(pci_dev, dev->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) error7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			  dev->phys_bounce_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) error6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) error5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	kfree(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) error4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	pci_release_regions(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) error3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) error2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) static void r852_remove(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	struct r852_device *dev = pci_get_drvdata(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	/* Stop detect workqueue -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		we are going to unregister the device anyway*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	cancel_delayed_work_sync(&dev->card_detect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	destroy_workqueue(dev->card_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	/* Unregister the device, this might make more IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	r852_unregister_nand_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	/* Stop interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	r852_disable_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/* Cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	kfree(dev->tmp_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	pci_iounmap(pci_dev, dev->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			  dev->phys_bounce_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	kfree(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* Shutdown the PCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	pci_release_regions(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void r852_shutdown(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct r852_device *dev = pci_get_drvdata(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	cancel_delayed_work_sync(&dev->card_detect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	r852_disable_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	synchronize_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int r852_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct r852_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (dev->ctlreg & R852_CTL_CARDENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	/* First make sure the detect work is gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	cancel_delayed_work_sync(&dev->card_detect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	/* Turn off the interrupts and stop the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	r852_disable_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	r852_engine_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	/* If card was pulled off just during the suspend, which is very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		unlikely, we will remove it on resume, it too late now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		anyway... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	dev->card_unstable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static int r852_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct r852_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	r852_disable_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	r852_card_update_present(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	r852_engine_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	/* If card status changed, just do the work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (dev->card_detected != dev->card_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		dbg("card was %s during low power state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			dev->card_detected ? "added" : "removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		queue_delayed_work(dev->card_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		&dev->card_detect_work, msecs_to_jiffies(1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* Otherwise, initialize the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (dev->card_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		r852_engine_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		nand_select_target(dev->chip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		nand_reset_op(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		nand_deselect_target(dev->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	/* Program card detection IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	r852_update_card_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static const struct pci_device_id r852_pci_id_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	{ PCI_VDEVICE(RICOH, 0x0852), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static struct pci_driver r852_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	.name		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	.id_table	= r852_pci_id_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	.probe		= r852_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	.remove		= r852_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	.shutdown	= r852_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	.driver.pm	= &r852_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) module_pci_driver(r852_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");