Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Access SD/MMC cards through SPI master controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * (C) Copyright 2005, Intec Automation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *		Mike Lavender (mike@steroidmicros)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * (C) Copyright 2006-2007, David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * (C) Copyright 2007, Axis Communications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *		Hans-Peter Nilsson (hp@axis.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * (C) Copyright 2007, ATRON electronic GmbH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *		Jan Nikitenko <jan.nikitenko@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/crc7.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/crc-itu-t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/mmc/mmc.h>		/* for R1_SPI_* bit values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/spi/mmc_spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /* NOTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * - For now, we won't try to interoperate with a real mmc/sd/sdio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *   controller, although some of them do have hardware support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *   SPI protocol.  The main reason for such configs would be mmc-ish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *   cards like DataFlash, which don't support that "native" protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *   We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *   switch between driver stacks, and in any case if "native" mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *   is available, it will be faster and hence preferable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * - MMC depends on a different chipselect management policy than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *   SPI interface currently supports for shared bus segments:  it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *   to issue multiple spi_message requests with the chipselect active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  *   using the results of one message to decide the next one to issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *   Pending updates to the programming interface, this driver expects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *   that it not share the bus with other drivers (precluding conflicts).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * - We tell the controller to keep the chipselect active from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *   beginning of an mmc_host_ops.request until the end.  So beware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *   of SPI controller drivers that mis-handle the cs_change flag!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *   However, many cards seem OK with chipselect flapping up/down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *   during that time ... at least on unshared bus segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * Local protocol constants, internal to data block protocols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /* Response tokens used to ack each block written: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define SPI_MMC_RESPONSE_CODE(x)	((x) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define SPI_RESPONSE_ACCEPTED		((2 << 1)|1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define SPI_RESPONSE_CRC_ERR		((5 << 1)|1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define SPI_RESPONSE_WRITE_ERR		((6 << 1)|1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) /* Read and write blocks start with these tokens and end with crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * on error, read tokens act like a subset of R2_SPI_* values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define SPI_TOKEN_SINGLE	0xfe	/* single block r/w, multiblock read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define SPI_TOKEN_MULTI_WRITE	0xfc	/* multiblock write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define SPI_TOKEN_STOP_TRAN	0xfd	/* terminate multiblock write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define MMC_SPI_BLOCKSIZE	512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define MMC_SPI_R1B_TIMEOUT_MS	3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define MMC_SPI_INIT_TIMEOUT_MS	3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /* One of the critical speed parameters is the amount of data which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * be transferred in one command. If this value is too low, the SD card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * controller has to do multiple partial block writes (argggh!). With
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * today (2008) SD cards there is little speed gain if we transfer more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * than 64 KBytes at a time. So use this value until there is any indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * that we should do more here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define MMC_SPI_BLOCKSATONCE	128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) /****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * Local Data Structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) /* "scratch" is per-{command,block} data exchanged with the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) struct scratch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	u8			status[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u8			data_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	__be16			crc_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) struct mmc_spi_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct mmc_host		*mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct spi_device	*spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	unsigned char		power_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u16			powerup_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	struct mmc_spi_platform_data	*pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	/* for bulk data transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	struct spi_transfer	token, t, crc, early_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct spi_message	m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	/* for status readback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct spi_transfer	status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct spi_message	readback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	/* underlying DMA-aware controller, or null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct device		*dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/* buffer used for commands and for message "overhead" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct scratch		*data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	dma_addr_t		data_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/* Specs say to write ones most of the time, even when the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 * has no need to read its input data; and many cards won't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	 * This is our source of those ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	void			*ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	dma_addr_t		ones_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) /****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * MMC-over-SPI protocol glue, used by the MMC stack interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static inline int mmc_cs_off(struct mmc_spi_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	/* chipselect will always be inactive after setup() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	return spi_setup(host->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (len > sizeof(*host->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	host->status.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		dma_sync_single_for_device(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 				host->data_dma, sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	status = spi_sync_locked(host->spi, &host->readback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		dma_sync_single_for_cpu(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				host->data_dma, sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			unsigned n, u8 byte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	u8 *cp = host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned long start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		int		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		unsigned	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		status = mmc_spi_readbytes(host, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			if (cp[i] != byte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 				return cp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		if (time_is_before_jiffies(start + timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		/* If we need long timeouts, we may release the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		 * We use jiffies here because we want to have a relation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		 * between elapsed time and the blocking of the scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		if (time_is_before_jiffies(start + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	return mmc_spi_skip(host, timeout, 1, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * hosts return!  The low byte holds R1_SPI bits.  The next byte may hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * R2_SPI bits ... for SEND_STATUS, or after data read errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * newer cards R7 (IF_COND).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static char *maptype(struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	switch (mmc_spi_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	case MMC_RSP_SPI_R1:	return "R1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	case MMC_RSP_SPI_R1B:	return "R1B";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	case MMC_RSP_SPI_R2:	return "R2/R5";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	case MMC_RSP_SPI_R3:	return "R3/R4/R7";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	default:		return "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) /* return zero, else negative errno after setting cmd->error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static int mmc_spi_response_get(struct mmc_spi_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		struct mmc_command *cmd, int cs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	unsigned long timeout_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	u8	*cp = host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	u8	*end = cp + host->t.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	int	value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	int	bitshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	u8 	leftover = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	unsigned short rotator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	int 	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	char	tag[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	snprintf(tag, sizeof(tag), "  ... CMD%d response SPI_%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		cmd->opcode, maptype(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	/* Except for data block reads, the whole response will already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * be stored in the scratch buffer.  It's somewhere after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 * command and the first byte we read after it.  We ignore that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	 * first byte.  After STOP_TRANSMISSION command it may include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 * two data bits, but otherwise it's all ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	cp += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	while (cp < end && *cp == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/* Data block reads (R1 response types) may need more data... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	if (cp == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		cp = host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		end = cp+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		/* Card sends N(CR) (== 1..8) bytes of all-ones then one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		 * status byte ... and we already scanned 2 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		 * REVISIT block read paths use nasty byte-at-a-time I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		 * so it can always DMA directly into the target buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		 * It'd probably be better to memcpy() the first chunk and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		 * avoid extra i/o calls...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		 * Note we check for more than 8 bytes, because in practice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		 * some SD cards are slow...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		for (i = 2; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			value = mmc_spi_readbytes(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			if (*cp != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				goto checkstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		value = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) checkstatus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	bitshift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (*cp & 0x80)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		/* Houston, we have an ugly card with a bit-shifted response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		rotator = *cp++ << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		/* read the next byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		if (cp == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			value = mmc_spi_readbytes(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			cp = host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			end = cp+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		rotator |= *cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		while (rotator & 0x8000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			bitshift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			rotator <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		cmd->resp[0] = rotator >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		leftover = rotator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		cmd->resp[0] = *cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	/* Status byte: the entire seven-bit R1 response.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (cmd->resp[0] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				& cmd->resp[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			value = -EFAULT; /* Bad address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			value = -ENOSYS; /* Function not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		else if (R1_SPI_COM_CRC & cmd->resp[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			value = -EILSEQ; /* Illegal byte sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 				& cmd->resp[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			value = -EIO;    /* I/O error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		/* else R1_SPI_IDLE, "it's resetting" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	switch (mmc_spi_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	/* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	 * and less-common stuff like various erase operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	case MMC_RSP_SPI_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		/* maybe we read all the busy tokens already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		while (cp < end && *cp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (cp == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				MMC_SPI_R1B_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/* SPI R2 == R1 + second status byte; SEND_STATUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * SPI R5 == R1 + data byte; IO_RW_DIRECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	case MMC_RSP_SPI_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		/* read the next byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (cp == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			value = mmc_spi_readbytes(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			cp = host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			end = cp+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		if (bitshift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			rotator = leftover << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			rotator |= *cp << bitshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			cmd->resp[0] |= (rotator & 0xFF00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			cmd->resp[0] |= *cp << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	/* SPI R3, R4, or R7 == R1 + 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	case MMC_RSP_SPI_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		rotator = leftover << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		cmd->resp[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			cmd->resp[1] <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			/* read the next byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			if (cp == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				value = mmc_spi_readbytes(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 					goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 				cp = host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				end = cp+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			if (bitshift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 				rotator |= *cp++ << bitshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				cmd->resp[1] |= (rotator >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 				rotator <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 				cmd->resp[1] |= *cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	/* SPI R1 == just one status byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	case MMC_RSP_SPI_R1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		dev_dbg(&host->spi->dev, "bad response type %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			mmc_spi_resp_type(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		if (value >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 			value = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			tag, cmd->resp[0], cmd->resp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/* disable chipselect on errors and some success cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (value >= 0 && cs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		cmd->error = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	mmc_cs_off(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) /* Issue command and read its response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * Returns zero on success, negative for error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * On error, caller must cope with mmc core retry mechanism.  That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * means immediate low-level resubmit, which affects the bus lock...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) mmc_spi_command_send(struct mmc_spi_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		struct mmc_command *cmd, int cs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct scratch		*data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	u8			*cp = data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	struct spi_transfer	*t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* We can handle most commands (except block reads) in one full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	 * duplex I/O operation before either starting the next transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	 * (data block or command) or else deselecting the card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 * First, write 7 bytes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 *  - an all-ones byte to ensure the card is ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 *  - opcode byte (plus start and transmission bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 *  - four bytes of big-endian argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 *  - crc7 (plus end bit) ... always computed, it's cheap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 * We init the whole buffer to all-ones, which is what we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * to write while we're reading (later) response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	memset(cp, 0xff, sizeof(data->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	cp[1] = 0x40 | cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	put_unaligned_be32(cmd->arg, cp + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	cp += 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/* Then, read up to 13 bytes (while writing all-ones):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 *  - N(CR) (== 1..8) bytes of all-ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 *  - status byte (for all response types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	 *  - the rest of the response, either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	 *      + nothing, for R1 or R1B responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	 *	+ second status byte, for R2 responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	 *	+ four data bytes, for R3 and R7 responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	 * Finally, read some more bytes ... in the nice cases we know in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 * advance how many, and reading 1 more is always OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	 *  - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	 *  - N(RC) (== 1..N) bytes of all-ones, before next command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	 *  - N(WR) (== 1..N) bytes of all-ones, before data write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * So in those cases one full duplex I/O of at most 21 bytes will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * handle the whole command, leaving the card ready to receive a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * data block or new command.  We do that whenever we can, shaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * CPU and IRQ costs (especially when using DMA or FIFOs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 * There are two other cases, where it's not generally practical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 * to rely on a single I/O:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	 *  - R1B responses need at least N(EC) bytes of all-zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	 *    In this case we can *try* to fit it into one I/O, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	 *    maybe read more data later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 *  - Data block reads are more troublesome, since a variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 *    number of padding bytes precede the token and data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 *      + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 *      + N(AC) (== 1..many) bytes of all-ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 *    In this case we currently only have minimal speedups here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 *    when N(CR) == 1 we can avoid I/O in response_get().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		cp += 2;	/* min(N(CR)) + status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		/* R1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		cp += 10;	/* max(N(CR)) + status + min(N(RC),N(WR)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		if (cmd->flags & MMC_RSP_SPI_S2)	/* R2/R5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		else if (cmd->flags & MMC_RSP_SPI_B4)	/* R3/R4/R7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			cp += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		else if (cmd->flags & MMC_RSP_BUSY)	/* R1B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			cp = data->status + sizeof(data->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		/* else:  R1 (most commands) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	dev_dbg(&host->spi->dev, "  mmc_spi: CMD%d, resp %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		cmd->opcode, maptype(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* send command, leaving chipselect active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	spi_message_init(&host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	t = &host->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	memset(t, 0, sizeof(*t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	t->tx_buf = t->rx_buf = data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	t->tx_dma = t->rx_dma = host->data_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	t->len = cp - data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	t->cs_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	spi_message_add_tail(t, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (host->dma_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		host->m.is_dma_mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		dma_sync_single_for_device(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 				host->data_dma, sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	status = spi_sync_locked(host->spi, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		dma_sync_single_for_cpu(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 				host->data_dma, sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		cmd->error = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* after no-data commands and STOP_TRANSMISSION, chipselect off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return mmc_spi_response_get(host, cmd, cs_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /* Build data message with up to four separate transfers.  For TX, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * start by writing the data token.  And in most cases, we finish with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * a status transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  * We always provide TX data for data and CRC.  The MMC/SD protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  * requires us to write ones; but Linux defaults to writing zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * so we explicitly initialize it to all ones on RX paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * We also handle DMA mapping, so the underlying SPI controller does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * not need to (re)do it for each message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) mmc_spi_setup_data_message(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	struct mmc_spi_host	*host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	int			multiple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	enum dma_data_direction	direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct spi_transfer	*t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct scratch		*scratch = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	dma_addr_t		dma = host->data_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	spi_message_init(&host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		host->m.is_dma_mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	/* for reads, readblock() skips 0xff bytes before finding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * the token; for writes, this transfer issues that token.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		t = &host->token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		memset(t, 0, sizeof(*t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		t->len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		if (multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			scratch->data_token = SPI_TOKEN_MULTI_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			scratch->data_token = SPI_TOKEN_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		t->tx_buf = &scratch->data_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			t->tx_dma = dma + offsetof(struct scratch, data_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		spi_message_add_tail(t, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	/* Body of transfer is buffer, then CRC ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * either TX-only, or RX with TX-ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	t = &host->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	memset(t, 0, sizeof(*t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	t->tx_buf = host->ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	t->tx_dma = host->ones_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	/* length and actual buffer info are written later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	spi_message_add_tail(t, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	t = &host->crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	memset(t, 0, sizeof(*t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	t->len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		/* the actual CRC may get written later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		t->tx_buf = &scratch->crc_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			t->tx_dma = dma + offsetof(struct scratch, crc_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		t->tx_buf = host->ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		t->tx_dma = host->ones_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		t->rx_buf = &scratch->crc_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			t->rx_dma = dma + offsetof(struct scratch, crc_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	spi_message_add_tail(t, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * A single block read is followed by N(EC) [0+] all-ones bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * before deselect ... don't bother.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * the next block is read, or a STOP_TRANSMISSION is issued.  We'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * collect that single byte, so readblock() doesn't need to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 * For a write, the one-byte data response follows immediately, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * Then single block reads may deselect, and multiblock ones issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 * the next token (next data block, or STOP_TRAN).  We can try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * minimize I/O ops by using a single read to collect end-of-busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	if (multiple || direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		t = &host->early_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		memset(t, 0, sizeof(*t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		t->tx_buf = host->ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		t->tx_dma = host->ones_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		t->rx_buf = scratch->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			t->rx_dma = dma + offsetof(struct scratch, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		t->cs_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		spi_message_add_tail(t, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * Write one block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  *  - caller handled preceding N(WR) [1+] all-ones bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *  - data block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *	+ token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  *	+ data bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  *	+ crc16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  *  - an all-ones byte ... card writes a data-response byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  *  - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * Return negative errno, else success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct spi_device	*spi = host->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	int			status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct scratch		*scratch = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	u32			pattern;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (host->mmc->use_spi_crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		dma_sync_single_for_device(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				host->data_dma, sizeof(*scratch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	status = spi_sync_locked(spi, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		dev_dbg(&spi->dev, "write error (%d)\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		dma_sync_single_for_cpu(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 				host->data_dma, sizeof(*scratch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	 * Get the transmission data-response reply.  It must follow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	 * immediately after the data block we transferred.  This reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	 * doesn't necessarily tell whether the write operation succeeded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	 * it just says if the transmission was ok and whether *earlier*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 * writes succeeded; see the standard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 * In practice, there are (even modern SDHC-)cards which are late
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	 * in sending the response, and miss the time frame by a few bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * so we have to cope with this situation and check the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * bit-by-bit. Arggh!!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	pattern = get_unaligned_be32(scratch->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/* First 3 bit of pattern are undefined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	pattern |= 0xE0000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* left-adjust to leading 0 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	while (pattern & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		pattern <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	/* right-adjust for pattern matching. Code is in bit 4..0 now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	pattern >>= 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	switch (pattern) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	case SPI_RESPONSE_ACCEPTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	case SPI_RESPONSE_CRC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		/* host shall then issue MMC_STOP_TRANSMISSION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		status = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	case SPI_RESPONSE_WRITE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		/* host shall then issue MMC_STOP_TRANSMISSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		 * and should MMC_SEND_STATUS to sort it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		dev_dbg(&spi->dev, "write error %02x (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			scratch->status[0], status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	t->tx_buf += t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		t->tx_dma += t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* Return when not busy.  If we didn't collect that status yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * we'll need some more I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	for (i = 4; i < sizeof(scratch->status); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		/* card is non-busy if the most recent bit is 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		if (scratch->status[i] & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return mmc_spi_wait_unbusy(host, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * Read one block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  *  - skip leading all-ones bytes ... either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  *      + N(AC) [1..f(clock,CSD)] usually, else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  *      + N(CX) [0..8] when reading CSD or CID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  *  - data block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  *	+ token ... if error token, no data or crc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  *	+ data bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  *	+ crc16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * before dropping chipselect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * For multiblock reads, caller either reads the next block or issues a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * STOP_TRANSMISSION command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct spi_device	*spi = host->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	struct scratch		*scratch = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	unsigned int 		bitshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	u8			leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/* At least one SD card sends an all-zeroes byte when N(CX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 * applies, before the all-ones bytes ... just cope with that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	status = mmc_spi_readbytes(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	status = scratch->status[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	if (status == 0xff || status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		status = mmc_spi_readtoken(host, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	/* The token may be bit-shifted...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	 * the first 0-bit precedes the data stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	bitshift = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	while (status & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		status <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		bitshift--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	leftover = status << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (host->dma_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		dma_sync_single_for_device(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 				host->data_dma, sizeof(*scratch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		dma_sync_single_for_device(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				t->rx_dma, t->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	status = spi_sync_locked(spi, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		dev_dbg(&spi->dev, "read error %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (host->dma_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		dma_sync_single_for_cpu(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 				host->data_dma, sizeof(*scratch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		dma_sync_single_for_cpu(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				t->rx_dma, t->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (bitshift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		/* Walk through the data and the crc and do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		 * all the magic to get byte-aligned data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		u8 *cp = t->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		unsigned int bitright = 8 - bitshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		u8 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		for (len = t->len; len; len--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			temp = *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			*cp++ = leftover | (temp >> bitshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			leftover = temp << bitright;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		cp = (u8 *) &scratch->crc_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		temp = *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		*cp++ = leftover | (temp >> bitshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		leftover = temp << bitright;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		temp = *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		*cp = leftover | (temp >> bitshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (host->mmc->use_spi_crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		u16 crc = crc_itu_t(0, t->rx_buf, t->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		be16_to_cpus(&scratch->crc_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		if (scratch->crc_val != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			dev_dbg(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				"read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				scratch->crc_val, crc, t->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	t->rx_buf += t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		t->rx_dma += t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * An MMC/SD data stage includes one or more blocks, optional CRCs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * and inline handshaking.  That handhaking makes it unlike most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * other SPI protocol stacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		struct mmc_data *data, u32 blk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	struct spi_device	*spi = host->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	struct device		*dma_dev = host->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	struct spi_transfer	*t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	enum dma_data_direction	direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct scatterlist	*sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	unsigned		n_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	int			multiple = (data->blocks > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	u32			clock_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	unsigned long		timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	direction = mmc_get_dma_dir(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	mmc_spi_setup_data_message(host, multiple, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	t = &host->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (t->speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		clock_rate = t->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		clock_rate = spi->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	timeout = data->timeout_ns / 1000 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		  data->timeout_clks * 1000000 / clock_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	/* Handle scatterlist segments one at a time, with synch for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	 * each 512-byte block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	for_each_sg(data->sg, sg, data->sg_len, n_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		int			status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		dma_addr_t		dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		void			*kmap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		unsigned		length = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		enum dma_data_direction	dir = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		/* set up dma mapping for controller drivers that might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		 * use DMA ... though they may fall back to PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		if (dma_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			/* never invalidate whole *shared* pages ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			if ((sg->offset != 0 || length != PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 					&& dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				dir = DMA_BIDIRECTIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 						PAGE_SIZE, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			if (dma_mapping_error(dma_dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 				data->error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			if (direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				t->tx_dma = dma_addr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 				t->rx_dma = dma_addr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		/* allow pio too; we don't allow highmem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		kmap_addr = kmap(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			t->tx_buf = kmap_addr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			t->rx_buf = kmap_addr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		/* transfer each block, and update request status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		while (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			t->len = min(length, blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			dev_dbg(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 				"    mmc_spi: %s block, %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				(direction == DMA_TO_DEVICE) ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 				t->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			if (direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				status = mmc_spi_writeblock(host, t, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				status = mmc_spi_readblock(host, t, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			data->bytes_xfered += t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			length -= t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			if (!multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		/* discard mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		if (direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			flush_kernel_dcache_page(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		kunmap(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		if (dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			data->error = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			dev_dbg(&spi->dev, "%s status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				(direction == DMA_TO_DEVICE) ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	/* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	 * can be issued before multiblock writes.  Unlike its more widely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * that can affect the STOP_TRAN logic.   Complete (and current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * MMC specs should sort that out before Linux starts using CMD23.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (direction == DMA_TO_DEVICE && multiple) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		struct scratch	*scratch = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		int		tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		const unsigned	statlen = sizeof(scratch->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		dev_dbg(&spi->dev, "    mmc_spi: STOP_TRAN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		/* Tweak the per-block message we set up earlier by morphing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		 * it to hold single buffer with the token followed by some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		 * "not busy any longer" status, and leave chip selected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		INIT_LIST_HEAD(&host->m.transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		list_add(&host->early_status.transfer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				&host->m.transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		memset(scratch->status, 0xff, statlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		scratch->status[0] = SPI_TOKEN_STOP_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		host->early_status.tx_buf = host->early_status.rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		host->early_status.tx_dma = host->early_status.rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		host->early_status.len = statlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			dma_sync_single_for_device(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 					host->data_dma, sizeof(*scratch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 					DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		tmp = spi_sync_locked(spi, &host->m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			dma_sync_single_for_cpu(host->dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 					host->data_dma, sizeof(*scratch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 					DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		if (tmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			if (!data->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 				data->error = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		/* Ideally we collected "not busy" status with one I/O,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		 * avoiding wasteful byte-at-a-time scanning... but more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		 * I/O is often needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		for (tmp = 2; tmp < statlen; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			if (scratch->status[tmp] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		tmp = mmc_spi_wait_unbusy(host, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (tmp < 0 && !data->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			data->error = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * MMC driver implementation -- the interface to the MMC stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct mmc_spi_host	*host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	int			status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	int			crc_retry = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	struct mmc_command	stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	/* MMC core and layered drivers *MUST* issue SPI-aware commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		struct mmc_command	*cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		int			invalid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		if (!mmc_spi_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			dev_dbg(&host->spi->dev, "bogus command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			cmd->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			invalid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		cmd = mrq->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		if (cmd && !mmc_spi_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			dev_dbg(&host->spi->dev, "bogus STOP command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			cmd->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			invalid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		if (invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			mmc_request_done(host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	/* request exclusive bus access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	spi_bus_lock(host->spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) crc_recover:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	/* issue command; then optionally data and stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (status == 0 && mrq->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		 * The SPI bus is not always reliable for large data transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		 * If an occasional crc error is reported by the SD device with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		 * data read/write over SPI, it may be recovered by repeating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		 * the last SD command again. The retry count is set to 5 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		 * ensure the driver passes stress tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		if (mrq->data->error == -EILSEQ && crc_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			stop.opcode = MMC_STOP_TRANSMISSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			stop.arg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			status = mmc_spi_command_send(host, mrq, &stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			crc_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			mrq->data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			goto crc_recover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		if (mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			mmc_cs_off(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	/* release the bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	spi_bus_unlock(host->spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	mmc_request_done(host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * NOTE that here we can't know that the card has just been powered up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * not all MMC/SD sockets support power switching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * this doesn't seem to do the right thing at all...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static void mmc_spi_initsequence(struct mmc_spi_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	/* Try to be very sure any previous command has completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 * wait till not-busy, skip debris from any old commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	mmc_spi_readbytes(host, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	 * Do a burst with chipselect active-high.  We need to do this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	 * meet the requirement of 74 clock cycles with both chipselect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 * and CMD (MOSI) high before CMD0 ... after the card has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 * powered up to Vdd(min), and so is ready to take commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * Some cards are particularly needy of this (e.g. Viking "SD256")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 * while most others don't seem to care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	 * Note that this is one of the places MMC/SD plays games with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * SPI protocol.  Another is that when chipselect is released while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 * the card returns BUSY status, the clock must issue several cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	 * with chipselect high before the card will stop driving its output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	 * SPI_CS_HIGH means "asserted" here. In some cases like when using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 * inverted by gpiolib, so if we want to ascertain to drive it high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * we should toggle the default with an XOR as we do here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	host->spi->mode ^= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	if (spi_setup(host->spi) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		/* Just warn; most cards work without it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		dev_warn(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				"can't change chip-select polarity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		host->spi->mode ^= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		mmc_spi_readbytes(host, 18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		host->spi->mode ^= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		if (spi_setup(host->spi) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			/* Wot, we can't get the same setup we had before? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			dev_err(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 					"can't restore chip-select polarity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static char *mmc_powerstring(u8 power_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	switch (power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	case MMC_POWER_OFF: return "off";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	case MMC_POWER_UP:  return "up";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	case MMC_POWER_ON:  return "on";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	return "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct mmc_spi_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (host->power_mode != ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		int		canpower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		canpower = host->pdata && host->pdata->setpower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 				mmc_powerstring(ios->power_mode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				ios->vdd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				canpower ? ", can switch" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		/* switch power on/off if possible, accounting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		 * max 250msec powerup time if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		if (canpower) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 				host->pdata->setpower(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 						ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 				if (ios->power_mode == MMC_POWER_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 					msleep(host->powerup_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		/* See 6.4.1 in the simplified SD card physical spec 2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (ios->power_mode == MMC_POWER_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			mmc_spi_initsequence(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		/* If powering down, ground all card inputs to avoid power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		 * delivery from data lines!  On a shared SPI bus, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		 * will probably be temporary; 6.4.2 of the simplified SD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		 * spec says this must last at least 1msec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		 *   - Clock low means CPOL 0, e.g. mode 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		 *   - MOSI low comes from writing zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		 *   - Chipselect is usually active low...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		if (canpower && ios->power_mode == MMC_POWER_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			int mres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			u8 nullbyte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			mres = spi_setup(host->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			if (mres < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 				dev_dbg(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 					"switch to SPI mode 0 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			if (spi_write(host->spi, &nullbyte, 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 				dev_dbg(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 					"put spi signals to low failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			 * Now clock should be low due to spi mode 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			 * MOSI should be low because of written 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			 * chipselect should be low (it is active low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			 * power supply is off, so now MMC is off too!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			 * FIXME no, chipselect can be high since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			 * device is inactive and SPI_CS_HIGH is clear...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			if (mres == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 				host->spi->mode |= (SPI_CPOL|SPI_CPHA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 				mres = spi_setup(host->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 				if (mres < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 					dev_dbg(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 						"switch back to SPI mode 3 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		host->power_mode = ios->power_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		int		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		host->spi->max_speed_hz = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		status = spi_setup(host->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		dev_dbg(&host->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			"mmc_spi:  clock to %d Hz, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			host->spi->max_speed_hz, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static const struct mmc_host_ops mmc_spi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	.request	= mmc_spi_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	.set_ios	= mmc_spi_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	.get_ro		= mmc_gpio_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	.get_cd		= mmc_gpio_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * SPI driver implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) mmc_spi_detect_irq(int irq, void *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct mmc_spi_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) #ifdef CONFIG_HAS_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	struct spi_device *spi = host->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	if (!spi->master->dev.parent->dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	dev = spi->master->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 					DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (dma_mapping_error(dev, host->ones_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 					DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (dma_mapping_error(dev, host->data_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	host->dma_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static void mmc_spi_dma_free(struct mmc_spi_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (!host->dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	dma_unmap_single(host->dma_dev, host->data_dma,	sizeof(*host->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			 DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static int mmc_spi_probe(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	void			*ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	struct mmc_host		*mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct mmc_spi_host	*host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	bool			has_ro = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	/* We rely on full duplex transfers, mostly to reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	 * per-transfer overheads (by making fewer transfers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	/* MMC and SD specs only seem to care that sampling is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	 * rising edge ... meaning SPI modes 0 or 3.  So either SPI mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	 * should be legit.  We'll use mode 0 since the steady state is 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	 * which is appropriate for hotplugging, unless the platform data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	 * specify mode 3 (if hardware is not compatible to mode 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (spi->mode != SPI_MODE_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		spi->mode = SPI_MODE_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	spi->bits_per_word = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	status = spi_setup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 				spi->mode, spi->max_speed_hz / 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/* We need a supply of ones to transmit.  This is the only time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 * the CPU touches these, so cache coherency isn't a concern.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 * NOTE if many systems use more than one MMC-over-SPI connector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	 * it'd save some memory to share this.  That's evidently rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	if (!ones)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	mmc->ops = &mmc_spi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	mmc->max_segs = MMC_SPI_BLOCKSATONCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	mmc->caps = MMC_CAP_SPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	/* SPI doesn't need the lowspeed device identification thing for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	 * MMC or SD cards, since it never comes up in open drain mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	 * That's good; some SPI masters can't handle very low speeds!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 * However, low speed SDIO cards need not handle over 400 KHz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	 * that's the only reason not to use a few MHz for f_min (until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 * the upper layer reads the target frequency from the CSD).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	mmc->f_min = 400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	mmc->f_max = spi->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	host->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	host->spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	host->ones = ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	/* Platform data is used to hook up things like card sensing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	 * and power switching gpios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	host->pdata = mmc_spi_get_pdata(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (host->pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		mmc->ocr_avail = host->pdata->ocr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (!mmc->ocr_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (host->pdata && host->pdata->setpower) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		host->powerup_msecs = host->pdata->powerup_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		if (!host->powerup_msecs || host->powerup_msecs > 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			host->powerup_msecs = 250;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	dev_set_drvdata(&spi->dev, mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	/* preallocate dma buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (!host->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		goto fail_nobuf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	status = mmc_spi_dma_alloc(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	/* setup message for status/busy readback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	spi_message_init(&host->readback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	host->readback.is_dma_mapped = (host->dma_dev != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	spi_message_add_tail(&host->status, &host->readback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	host->status.tx_buf = host->ones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	host->status.tx_dma = host->ones_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	host->status.rx_buf = &host->data->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	host->status.cs_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/* register card detect irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	if (host->pdata && host->pdata->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			goto fail_glue_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	/* pass platform capabilities, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (host->pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		mmc->caps |= host->pdata->caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		mmc->caps2 |= host->pdata->caps2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	status = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		goto fail_add_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	 * Index 0 is card detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	 * Old boardfiles were specifying 1 ms as debounce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (status == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		goto fail_add_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		 * The platform has a CD GPIO signal that may support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		 * interrupts, so let mmc_gpiod_request_cd_irq() decide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		 * if polling is needed or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		mmc_gpiod_request_cd_irq(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	mmc_detect_change(mmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	/* Index 1 is write protect/read only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (status == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		goto fail_add_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		has_ro = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			dev_name(&mmc->class_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			host->dma_dev ? "" : ", no DMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			has_ro ? "" : ", no WP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			(host->pdata && host->pdata->setpower)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 				? "" : ", no poweroff",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			(mmc->caps & MMC_CAP_NEEDS_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 				? ", cd polling" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) fail_add_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	mmc_remove_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) fail_glue_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	mmc_spi_dma_free(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) fail_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	kfree(host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) fail_nobuf1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	mmc_spi_put_pdata(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	kfree(ones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static int mmc_spi_remove(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	struct mmc_host		*mmc = dev_get_drvdata(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	struct mmc_spi_host	*host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	/* prevent new mmc_detect_change() calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	if (host->pdata && host->pdata->exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		host->pdata->exit(&spi->dev, mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	mmc_remove_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	mmc_spi_dma_free(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	kfree(host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	kfree(host->ones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	spi->max_speed_hz = mmc->f_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	mmc_spi_put_pdata(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static const struct of_device_id mmc_spi_of_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	{ .compatible = "mmc-spi-slot", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static struct spi_driver mmc_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		.name =		"mmc_spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		.of_match_table = mmc_spi_of_match_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	.probe =	mmc_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	.remove =	mmc_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) module_spi_driver(mmc_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) MODULE_DESCRIPTION("SPI SD/MMC host driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) MODULE_ALIAS("spi:mmc_spi");