Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define QUP_CONFIG			0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define QUP_STATE			0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define QUP_IO_M_MODES			0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define QUP_SW_RESET			0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define QUP_OPERATIONAL			0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define QUP_ERROR_FLAGS			0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define QUP_ERROR_FLAGS_EN		0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define QUP_OPERATIONAL_MASK		0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define QUP_HW_VERSION			0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define QUP_MX_OUTPUT_CNT		0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define QUP_OUTPUT_FIFO			0x0110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define QUP_MX_WRITE_CNT		0x0150
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define QUP_MX_INPUT_CNT		0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define QUP_MX_READ_CNT			0x0208
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define QUP_INPUT_FIFO			0x0218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define SPI_CONFIG			0x0300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define SPI_IO_CONTROL			0x0304
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define SPI_ERROR_FLAGS			0x0308
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define SPI_ERROR_FLAGS_EN		0x030c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /* QUP_CONFIG fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define QUP_CONFIG_SPI_MODE		(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define QUP_CONFIG_NO_INPUT		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define QUP_CONFIG_NO_OUTPUT		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define QUP_CONFIG_N			0x001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /* QUP_STATE fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define QUP_STATE_VALID			BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define QUP_STATE_RESET			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define QUP_STATE_RUN			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define QUP_STATE_PAUSE			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define QUP_STATE_MASK			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define QUP_STATE_CLEAR			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define QUP_HW_VERSION_2_1_1		0x20010001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /* QUP_IO_M_MODES fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define QUP_IO_M_PACK_EN		BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define QUP_IO_M_UNPACK_EN		BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define QUP_IO_M_MODE_FIFO		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define QUP_IO_M_MODE_BLOCK		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define QUP_IO_M_MODE_DMOV		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define QUP_IO_M_MODE_BAM		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /* QUP_OPERATIONAL fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define QUP_OP_IN_SERVICE_FLAG		BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define QUP_OP_IN_FIFO_FULL		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define QUP_OP_OUT_FIFO_FULL		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) /* SPI_CONFIG fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define SPI_CONFIG_HS_MODE		BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define SPI_CONFIG_INPUT_FIRST		BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define SPI_CONFIG_LOOPBACK		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) /* SPI_IO_CONTROL fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define SPI_IO_C_FORCE_CS		BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define SPI_IO_C_MX_CS_MODE		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define SPI_IO_C_CS_SELECT_MASK		0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define SPI_IO_C_TRISTATE_CS		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define SPI_IO_C_NO_TRI_STATE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define SPI_ERROR_CLK_OVER_RUN		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define SPI_NUM_CHIPSELECTS		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define SPI_MAX_XFER			(SZ_64K - 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) /* high speed mode is when bus rate is greater then 26MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define SPI_HS_MIN_RATE			26000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define SPI_MAX_RATE			50000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define SPI_DELAY_THRESHOLD		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define SPI_DELAY_RETRY			10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) struct spi_qup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	void __iomem		*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct clk		*cclk;	/* core clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct clk		*iclk;	/* interface clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	int			irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	int			in_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	int			out_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	int			in_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	int			out_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct spi_transfer	*xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	struct completion	done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	int			w_size;	/* bytes per SPI word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	int			n_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	int			tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	int			rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	const u8		*tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	u8			*rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	int			qup_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	int			mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	struct dma_slave_config	rx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	struct dma_slave_config	tx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return (opflag & flag) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static inline bool spi_qup_is_dma_xfer(int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) /* get's the transaction size length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static inline unsigned int spi_qup_len(struct spi_qup *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	return controller->n_words * controller->w_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	return opstate & QUP_STATE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static int spi_qup_set_state(struct spi_qup *controller, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	unsigned long loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u32 cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	while (!spi_qup_is_valid_state(controller)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		if (++loop > SPI_DELAY_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (loop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			loop, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	cur_state = readl_relaxed(controller->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * of (b10) are required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	    (state == QUP_STATE_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		cur_state &= ~QUP_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		cur_state |= state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		writel_relaxed(cur_state, controller->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	while (!spi_qup_is_valid_state(controller)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		if (++loop > SPI_DELAY_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	u8 *rx_buf = controller->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	int i, shift, num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	u32 word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	for (; num_words; num_words--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		num_bytes = min_t(int, spi_qup_len(controller) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				       controller->rx_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 				       controller->w_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		if (!rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			controller->rx_bytes += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			 * The data format depends on bytes per SPI word:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			 *  4 bytes: 0x12345678
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			 *  2 bytes: 0x00001234
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			 *  1 byte : 0x00000012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			shift = BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			shift *= (controller->w_size - i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			rx_buf[controller->rx_bytes] = word >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	u32 remainder, words_per_block, num_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 				 controller->w_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	words_per_block = controller->in_blk_sz >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		/* ACK by clearing service flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			       controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (!remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		if (is_block_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			num_words = (remainder > words_per_block) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 					words_per_block : remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			if (!spi_qup_is_flag_set(controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 						 QUP_OP_IN_FIFO_NOT_EMPTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			num_words = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		/* read up to the maximum transfer size available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		spi_qup_read_from_fifo(controller, num_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		remainder -= num_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		/* if block mode, check to see if next block is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		if (is_block_mode && !spi_qup_is_flag_set(controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 					QUP_OP_IN_BLOCK_READ_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	} while (remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * reads, it has to be cleared again at the very end.  However, be sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 * present and this is used to determine if transaction is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (!remainder) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				       controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	const u8 *tx_buf = controller->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	int i, num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	u32 word, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	for (; num_words; num_words--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		word = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		num_bytes = min_t(int, spi_qup_len(controller) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				       controller->tx_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				       controller->w_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		if (tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			for (i = 0; i < num_bytes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 				data = tx_buf[controller->tx_bytes + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				word |= data << (BITS_PER_BYTE * (3 - i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		controller->tx_bytes += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static void spi_qup_dma_done(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct spi_qup *qup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	complete(&qup->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static void spi_qup_write(struct spi_qup *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	u32 remainder, words_per_block, num_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 				 controller->w_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	words_per_block = controller->out_blk_sz >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		/* ACK by clearing service flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			       controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		/* make sure the interrupt is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		if (!remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		if (is_block_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			num_words = (remainder > words_per_block) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 				words_per_block : remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			if (spi_qup_is_flag_set(controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 						QUP_OP_OUT_FIFO_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			num_words = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		spi_qup_write_to_fifo(controller, num_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		remainder -= num_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		/* if block mode, check to see if next block is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		if (is_block_mode && !spi_qup_is_flag_set(controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 					QUP_OP_OUT_BLOCK_WRITE_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	} while (remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			   unsigned int nents, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			   dma_async_tx_callback callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct spi_qup *qup = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		chan = master->dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		chan = master->dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (IS_ERR_OR_NULL(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		return desc ? PTR_ERR(desc) : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	desc->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	desc->callback_param = qup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	return dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static void spi_qup_dma_terminate(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				  struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		dmaengine_terminate_all(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		dmaengine_terminate_all(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 				     u32 *nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	u32 total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	for (sg = sgl; sg; sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		unsigned int len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		/* check for overflow as well as limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		if (((total + len) < total) || ((total + len) > max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		total += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		(*nents)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	return total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			  unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	struct spi_master *master = spi->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	struct spi_qup *qup = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct scatterlist *tx_sgl, *rx_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		rx_done = spi_qup_dma_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	else if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		tx_done = spi_qup_dma_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	rx_sgl = xfer->rx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	tx_sgl = xfer->tx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		u32 rx_nents = 0, tx_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		if (rx_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 					SPI_MAX_XFER, &rx_nents) / qup->w_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		if (tx_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 					SPI_MAX_XFER, &tx_nents) / qup->w_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		if (!qup->n_words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		ret = spi_qup_io_config(spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		/* before issuing the descriptors, set the QUP to run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			dev_warn(qup->dev, "cannot set RUN state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		if (rx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 					      DMA_DEV_TO_MEM, rx_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			dma_async_issue_pending(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		if (tx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 					      DMA_MEM_TO_DEV, tx_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			dma_async_issue_pending(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		if (!wait_for_completion_timeout(&qup->done, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	} while (rx_sgl || tx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			  unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct spi_master *master = spi->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	struct spi_qup *qup = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	int ret, n_words, iterations, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	n_words = qup->n_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	iterations = n_words / SPI_MAX_XFER; /* round down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	qup->rx_buf = xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	qup->tx_buf = xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		if (iterations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			qup->n_words = SPI_MAX_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			qup->n_words = n_words % SPI_MAX_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (qup->tx_buf && offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (qup->rx_buf && offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		 * if the transaction is small enough, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		 * to fallback to FIFO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			qup->mode = QUP_IO_M_MODE_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		ret = spi_qup_io_config(spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			dev_warn(qup->dev, "cannot set RUN state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			dev_warn(qup->dev, "cannot set PAUSE state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		if (qup->mode == QUP_IO_M_MODE_FIFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			spi_qup_write(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			dev_warn(qup->dev, "cannot set RUN state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if (!wait_for_completion_timeout(&qup->done, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	} while (iterations--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static bool spi_qup_data_pending(struct spi_qup *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	unsigned int remainder_tx, remainder_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				    controller->tx_bytes, controller->w_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 				    controller->rx_bytes, controller->w_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	return remainder_tx || remainder_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct spi_qup *controller = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	u32 opflags, qup_err, spi_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (qup_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (spi_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			dev_warn(controller->dev, "CLK_OVER_RUN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	spin_lock_irqsave(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (!controller->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		controller->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	spin_unlock_irqrestore(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (spi_qup_is_dma_xfer(controller->mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		if (opflags & QUP_OP_IN_SERVICE_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			spi_qup_read(controller, &opflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			spi_qup_write(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (!spi_qup_data_pending(controller))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			complete(&controller->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		complete(&controller->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if (!spi_qup_is_dma_xfer(controller->mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			if (spi_qup_data_pending(controller))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 				return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		complete(&controller->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) /* set clock freq ... bits per word, determine mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		dev_err(controller->dev, "too big size for loopback %d > %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			xfer->len, controller->in_fifo_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		dev_err(controller->dev, "fail to set frequency %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	controller->n_words = xfer->len / controller->w_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		controller->mode = QUP_IO_M_MODE_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	else if (spi->master->can_dma &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		 spi->master->can_dma(spi->master, spi, xfer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 spi->master->cur_msg_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		controller->mode = QUP_IO_M_MODE_BAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		controller->mode = QUP_IO_M_MODE_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) /* prep qup for another spi transaction of specific type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	u32 config, iomode, control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	spin_lock_irqsave(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	controller->xfer     = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	controller->error    = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	controller->rx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	controller->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	spin_unlock_irqrestore(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		dev_err(controller->dev, "cannot set RESET state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	switch (controller->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	case QUP_IO_M_MODE_FIFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		writel_relaxed(controller->n_words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			       controller->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		writel_relaxed(controller->n_words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			       controller->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		/* must be zero for FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	case QUP_IO_M_MODE_BAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		writel_relaxed(controller->n_words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			       controller->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		writel_relaxed(controller->n_words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			       controller->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		/* must be zero for BLOCK and BAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		if (!controller->qup_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			void __iomem *input_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			input_cnt = controller->base + QUP_MX_INPUT_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			 * for DMA transfers, both QUP_MX_INPUT_CNT and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			 * That case is a non-balanced transfer when there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			 * only a rx_buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				writel_relaxed(0, input_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 				writel_relaxed(controller->n_words, input_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	case QUP_IO_M_MODE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		reinit_completion(&controller->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		writel_relaxed(controller->n_words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			       controller->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		writel_relaxed(controller->n_words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			       controller->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		/* must be zero for BLOCK and BAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		dev_err(controller->dev, "unknown mode = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				controller->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/* Set input and output transfer mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	if (!spi_qup_is_dma_xfer(controller->mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		control |= SPI_IO_C_CLK_IDLE_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	config = readl_relaxed(controller->base + SPI_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (spi->mode & SPI_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		config |= SPI_CONFIG_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		config &= ~SPI_CONFIG_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	if (spi->mode & SPI_CPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		config &= ~SPI_CONFIG_INPUT_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		config |= SPI_CONFIG_INPUT_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 * HS_MODE improves signal stability for spi-clk high rates,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * but is invalid in loop back mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		config |= SPI_CONFIG_HS_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		config &= ~SPI_CONFIG_HS_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	writel_relaxed(config, controller->base + SPI_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	config = readl_relaxed(controller->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	config |= xfer->bits_per_word - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	config |= QUP_CONFIG_SPI_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (spi_qup_is_dma_xfer(controller->mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (!xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			config |= QUP_CONFIG_NO_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		if (!xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			config |= QUP_CONFIG_NO_INPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	writel_relaxed(config, controller->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	/* only write to OPERATIONAL_MASK when register is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (!controller->qup_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		u32 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		 * status change in BAM mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		if (spi_qup_is_dma_xfer(controller->mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static int spi_qup_transfer_one(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			      struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			      struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct spi_qup *controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	unsigned long timeout, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	ret = spi_qup_io_prep(spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				     xfer->len) * 8, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	timeout = 100 * msecs_to_jiffies(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	reinit_completion(&controller->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	spin_lock_irqsave(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	controller->xfer     = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	controller->error    = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	controller->rx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	controller->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	spin_unlock_irqrestore(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (spi_qup_is_dma_xfer(controller->mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		ret = spi_qup_do_dma(spi, xfer, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		ret = spi_qup_do_pio(spi, xfer, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	spi_qup_set_state(controller, QUP_STATE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	spin_lock_irqsave(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		ret = controller->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	spin_unlock_irqrestore(&controller->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (ret && spi_qup_is_dma_xfer(controller->mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		spi_qup_dma_terminate(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			    struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	struct spi_qup *qup = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	size_t dma_align = dma_get_cache_alignment();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	int n_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		    IS_ERR_OR_NULL(master->dma_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		    IS_ERR_OR_NULL(master->dma_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) static void spi_qup_release_dma(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (!IS_ERR_OR_NULL(master->dma_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		dma_release_channel(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (!IS_ERR_OR_NULL(master->dma_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		dma_release_channel(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	struct spi_qup *spi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct dma_slave_config *rx_conf = &spi->rx_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				*tx_conf = &spi->tx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	struct device *dev = spi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/* allocate dma resources, if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	master->dma_rx = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (IS_ERR(master->dma_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		return PTR_ERR(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	master->dma_tx = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	if (IS_ERR(master->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		ret = PTR_ERR(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		goto err_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	/* set DMA parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	rx_conf->direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	rx_conf->device_fc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	rx_conf->src_addr = base + QUP_INPUT_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	rx_conf->src_maxburst = spi->in_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	tx_conf->direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	tx_conf->device_fc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	tx_conf->dst_maxburst = spi->out_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		dev_err(dev, "failed to configure RX channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		dev_err(dev, "failed to configure TX channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	dma_release_channel(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) err_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	dma_release_channel(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static void spi_qup_set_cs(struct spi_device *spi, bool val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	struct spi_qup *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	u32 spi_ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	u32 spi_ioc_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	controller = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	spi_ioc_orig = spi_ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		spi_ioc |= SPI_IO_C_FORCE_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		spi_ioc &= ~SPI_IO_C_FORCE_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (spi_ioc != spi_ioc_orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) static int spi_qup_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct clk *iclk, *cclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct spi_qup *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	u32 max_freq, iomode, num_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	int ret, irq, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	cclk = devm_clk_get(dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (IS_ERR(cclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		return PTR_ERR(cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	iclk = devm_clk_get(dev, "iface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (IS_ERR(iclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		return PTR_ERR(iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* This is optional parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		max_freq = SPI_MAX_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (!max_freq || max_freq > SPI_MAX_RATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		dev_err(dev, "invalid clock frequency %d\n", max_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	ret = clk_prepare_enable(cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		dev_err(dev, "cannot enable core clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	ret = clk_prepare_enable(iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		clk_disable_unprepare(cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		dev_err(dev, "cannot enable iface clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	master = spi_alloc_master(dev, sizeof(struct spi_qup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (!master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		clk_disable_unprepare(cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		clk_disable_unprepare(iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		dev_err(dev, "cannot allocate master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/* use num-cs unless not present or out of range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	    num_cs > SPI_NUM_CHIPSELECTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		master->num_chipselect = SPI_NUM_CHIPSELECTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		master->num_chipselect = num_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	master->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	master->max_speed_hz = max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	master->transfer_one = spi_qup_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	master->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	master->dma_alignment = dma_get_cache_alignment();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	master->max_dma_len = SPI_MAX_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	controller->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	controller->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	controller->iclk = iclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	controller->cclk = cclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	controller->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	ret = spi_qup_init_dma(master, res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		master->can_dma = spi_qup_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (!controller->qup_v1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		master->set_cs = spi_qup_set_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	spin_lock_init(&controller->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	init_completion(&controller->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		controller->out_blk_sz = size * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		controller->out_blk_sz = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		controller->in_blk_sz = size * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		controller->in_blk_sz = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		 controller->in_blk_sz, controller->in_fifo_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 controller->out_blk_sz, controller->out_fifo_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	writel_relaxed(1, base + QUP_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		dev_err(dev, "cannot set RESET state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		goto error_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	writel_relaxed(0, base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	writel_relaxed(0, base + QUP_IO_M_MODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	if (!controller->qup_v1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		       base + SPI_ERROR_FLAGS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/* if earlier version of the QUP, disable INPUT_OVERRUN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (controller->qup_v1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			base + QUP_ERROR_FLAGS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	writel_relaxed(0, base + SPI_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		goto error_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	pm_runtime_use_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	ret = devm_spi_register_master(dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		goto disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) disable_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) error_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	spi_qup_release_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	clk_disable_unprepare(cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	clk_disable_unprepare(iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static int spi_qup_pm_suspend_runtime(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct spi_master *master = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct spi_qup *controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	/* Enable clocks auto gaiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	config = readl(controller->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	writel_relaxed(config, controller->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	clk_disable_unprepare(controller->cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	clk_disable_unprepare(controller->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int spi_qup_pm_resume_runtime(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	struct spi_master *master = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct spi_qup *controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	ret = clk_prepare_enable(controller->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	ret = clk_prepare_enable(controller->cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	/* Disable clocks auto gaiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	config = readl_relaxed(controller->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	writel_relaxed(config, controller->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static int spi_qup_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct spi_master *master = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct spi_qup *controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (pm_runtime_suspended(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		ret = spi_qup_pm_resume_runtime(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	ret = spi_master_suspend(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	clk_disable_unprepare(controller->cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	clk_disable_unprepare(controller->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int spi_qup_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	struct spi_master *master = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	struct spi_qup *controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	ret = clk_prepare_enable(controller->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	ret = clk_prepare_enable(controller->cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	return spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static int spi_qup_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct spi_master *master = dev_get_drvdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct spi_qup *controller = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	ret = pm_runtime_resume_and_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	spi_qup_release_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	clk_disable_unprepare(controller->cclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	clk_disable_unprepare(controller->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static const struct of_device_id spi_qup_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	{ .compatible = "qcom,spi-qup-v2.1.1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	{ .compatible = "qcom,spi-qup-v2.2.1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static const struct dev_pm_ops spi_qup_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			   spi_qup_pm_resume_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			   NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) static struct platform_driver spi_qup_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		.name		= "spi_qup",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		.pm		= &spi_qup_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		.of_match_table = spi_qup_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	.probe = spi_qup_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	.remove = spi_qup_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) module_platform_driver(spi_qup_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) MODULE_ALIAS("platform:spi_qup");