Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/interconnect.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/pm_opp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/spi/spi-mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define QSPI_NUM_CS		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define QSPI_BYTES_PER_WORD	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define MSTR_CONFIG		0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define FULL_CYCLE_MODE		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define FB_CLK_EN		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define PIN_HOLDN		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define PIN_WPN			BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define DMA_ENABLE		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define BIG_ENDIAN_MODE		BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define SPI_MODE_MSK		0xc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define SPI_MODE_SHFT		10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define CHIP_SELECT_NUM		BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define SBL_EN			BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define LPA_BASE_MSK		0x3c000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define LPA_BASE_SHFT		14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define TX_DATA_DELAY_MSK	0xc0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define TX_DATA_DELAY_SHFT	18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define TX_CLK_DELAY_MSK	0x300000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define TX_CLK_DELAY_SHFT	20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define TX_CS_N_DELAY_MSK	0xc00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define TX_CS_N_DELAY_SHFT	22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define TX_DATA_OE_DELAY_MSK	0x3000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define TX_DATA_OE_DELAY_SHFT	24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define AHB_MASTER_CFG				0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define HMEM_TYPE_START_MID_TRANS_MSK		0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define HMEM_TYPE_START_MID_TRANS_SHFT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define HMEM_TYPE_LAST_TRANS_MSK		0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define HMEM_TYPE_LAST_TRANS_SHFT		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK	0xc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define HMEMTYPE_READ_TRANS_MSK			0x700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define HMEMTYPE_READ_TRANS_SHFT		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define HSHARED					BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define HINNERSHARED				BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define MSTR_INT_EN		0x000C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define MSTR_INT_STATUS		0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define RESP_FIFO_UNDERRUN	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define RESP_FIFO_NOT_EMPTY	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define RESP_FIFO_RDY		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define HRESP_FROM_NOC_ERR	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define WR_FIFO_EMPTY		BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define WR_FIFO_FULL		BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define WR_FIFO_OVERRUN		BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define TRANSACTION_DONE	BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define QSPI_ERR_IRQS		(RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 				 WR_FIFO_OVERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define QSPI_ALL_IRQS		(QSPI_ERR_IRQS | RESP_FIFO_RDY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				 WR_FIFO_EMPTY | WR_FIFO_FULL | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 				 TRANSACTION_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define PIO_XFER_CTRL		0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define REQUEST_COUNT_MSK	0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define PIO_XFER_CFG		0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define TRANSFER_DIRECTION	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define MULTI_IO_MODE_MSK	0xe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define MULTI_IO_MODE_SHFT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define TRANSFER_FRAGMENT	BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define SDR_1BIT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define SDR_2BIT		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define SDR_4BIT		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define DDR_1BIT		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define DDR_2BIT		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define DDR_4BIT		7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define DMA_DESC_SINGLE_SPI	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define DMA_DESC_DUAL_SPI	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define DMA_DESC_QUAD_SPI	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define PIO_XFER_STATUS		0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define WR_FIFO_BYTES_MSK	0xffff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define WR_FIFO_BYTES_SHFT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define PIO_DATAOUT_1B		0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define PIO_DATAOUT_4B		0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define RD_FIFO_CFG		0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define CONTINUOUS_MODE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define RD_FIFO_STATUS	0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define FIFO_EMPTY	BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define WR_CNTS_MSK	0x7f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define WR_CNTS_SHFT	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define RDY_64BYTE	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define RDY_32BYTE	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define RDY_16BYTE	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define FIFO_RDY	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define RD_FIFO_RESET		0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define RESET_FIFO		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define CUR_MEM_ADDR		0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define HW_VERSION		0x004c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define RD_FIFO			0x0050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SAMPLING_CLK_CFG	0x0090
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SAMPLING_CLK_STATUS	0x0094
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enum qspi_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	QSPI_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	QSPI_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct qspi_xfer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		const void *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		void *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	unsigned int rem_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	unsigned int buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	enum qspi_dir dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	bool is_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) enum qspi_clocks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	QSPI_CLK_CORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	QSPI_CLK_IFACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	QSPI_NUM_CLKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct qcom_qspi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct clk_bulk_data *clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct qspi_xfer xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct icc_path *icc_path_cpu_to_qspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct opp_table *opp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	unsigned long last_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	/* Lock to protect data accessed by IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 				   unsigned int buswidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	switch (buswidth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return SDR_1BIT << MULTI_IO_MODE_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		return SDR_2BIT << MULTI_IO_MODE_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		return SDR_4BIT << MULTI_IO_MODE_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		dev_warn_once(ctrl->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				"Unexpected bus width: %u\n", buswidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return SDR_1BIT << MULTI_IO_MODE_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	u32 pio_xfer_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	const struct qspi_xfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	xfer = &ctrl->xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	pio_xfer_cfg &= ~TRANSFER_DIRECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	pio_xfer_cfg |= xfer->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (xfer->is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		pio_xfer_cfg |= TRANSFER_FRAGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	u32 pio_xfer_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	u32 ints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	qcom_qspi_pio_xfer_cfg(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/* Ack any previous interrupts that might be hanging around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* Setup new interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (ctrl->xfer.dir == QSPI_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	writel(ints, ctrl->base + MSTR_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/* Kick off the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	qcom_qspi_pio_xfer_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void qcom_qspi_handle_err(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				 struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct qcom_qspi *ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	spin_lock_irqsave(&ctrl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	writel(0, ctrl->base + MSTR_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	ctrl->xfer.rem_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	spin_unlock_irqrestore(&ctrl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	unsigned int avg_bw_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (speed_hz == ctrl->last_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/* In regular operation (SBL_EN=1) core must be 4x transfer clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	 * Set BW quota for CPU as driver supports FIFO mode only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	 * We don't have explicit peak requirement so keep it equal to avg_bw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	avg_bw_cpu = Bps_to_icc(speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	ctrl->last_speed = speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int qcom_qspi_transfer_one(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				  struct spi_device *slv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				  struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct qcom_qspi *ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	unsigned long speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	speed_hz = slv->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (xfer->speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		speed_hz = xfer->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	ret = qcom_qspi_set_speed(ctrl, speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	spin_lock_irqsave(&ctrl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/* We are half duplex, so either rx or tx will be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		ctrl->xfer.dir = QSPI_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		ctrl->xfer.buswidth = xfer->rx_nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		ctrl->xfer.rx_buf = xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		ctrl->xfer.dir = QSPI_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		ctrl->xfer.buswidth = xfer->tx_nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		ctrl->xfer.tx_buf = xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 					  &master->cur_msg->transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	ctrl->xfer.rem_bytes = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	qcom_qspi_pio_xfer(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	spin_unlock_irqrestore(&ctrl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	/* We'll call spi_finalize_current_transfer() when done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int qcom_qspi_prepare_message(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				     struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	u32 mstr_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct qcom_qspi *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	int tx_data_oe_delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	int tx_data_delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	spin_lock_irqsave(&ctrl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	mstr_cfg &= ~CHIP_SELECT_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (message->spi->chip_select)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		mstr_cfg |= CHIP_SELECT_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	mstr_cfg &= ~DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	spin_unlock_irqrestore(&ctrl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static irqreturn_t pio_read(struct qcom_qspi *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	u32 rd_fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	u32 rd_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	unsigned int wr_cnts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	unsigned int bytes_to_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	unsigned int words_to_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	u32 *word_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	u8 *byte_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (!(rd_fifo_status & FIFO_RDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (words_to_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		word_buf = ctrl->xfer.rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		ctrl->xfer.rx_buf = word_buf + words_to_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (bytes_to_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		byte_buf = ctrl->xfer.rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		rd_fifo = readl(ctrl->base + RD_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		ctrl->xfer.rem_bytes -= bytes_to_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		for (i = 0; i < bytes_to_read; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			*byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		ctrl->xfer.rx_buf = byte_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static irqreturn_t pio_write(struct qcom_qspi *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	const void *xfer_buf = ctrl->xfer.tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	const int *word_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	const char *byte_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	unsigned int wr_fifo_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	unsigned int wr_fifo_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	unsigned int wr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	unsigned int rem_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		/* Process the last 1-3 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		ctrl->xfer.rem_bytes -= wr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		byte_buf = xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		while (wr_size--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			writel(*byte_buf++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			       ctrl->base + PIO_DATAOUT_1B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		ctrl->xfer.tx_buf = byte_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		 * Process all the whole words; to keep things simple we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		 * just wait for the next interrupt to handle the last 1-3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		 * bytes if we don't have an even number of words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		wr_size = min(rem_words, wr_fifo_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		word_buf = xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		ctrl->xfer.tx_buf = word_buf + wr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	u32 int_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct qcom_qspi *ctrl = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	spin_lock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	int_status = readl(ctrl->base + MSTR_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	writel(int_status, ctrl->base + MSTR_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (ctrl->xfer.dir == QSPI_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		if (int_status & WR_FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			ret = pio_write(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		if (int_status & RESP_FIFO_RDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			ret = pio_read(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (int_status & QSPI_ERR_IRQS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		if (int_status & RESP_FIFO_UNDERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		if (int_status & WR_FIFO_OVERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		if (int_status & HRESP_FROM_NOC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			dev_err(ctrl->dev, "IRQ error: NOC response error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	if (!ctrl->xfer.rem_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		writel(0, ctrl->base + MSTR_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	spin_unlock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int qcom_qspi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	struct qcom_qspi *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	master = devm_spi_alloc_master(dev, sizeof(*ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	spin_lock_init(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	ctrl->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	ctrl->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	if (IS_ERR(ctrl->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		return PTR_ERR(ctrl->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 				  sizeof(*ctrl->clks), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	if (!ctrl->clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	ctrl->clks[QSPI_CLK_CORE].id = "core";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	ctrl->clks[QSPI_CLK_IFACE].id = "iface";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (IS_ERR(ctrl->icc_path_cpu_to_qspi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 				     "Failed to get cpu path\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	/* Set BW vote for register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 				Bps_to_icc(1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 				__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 				__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	ret = devm_request_irq(dev, ret, qcom_qspi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		dev_err(dev, "Failed to request irq %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	master->max_speed_hz = 300000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	master->num_chipselect = QSPI_NUM_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	master->bus_num = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	master->mode_bits = SPI_MODE_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			    SPI_TX_DUAL | SPI_RX_DUAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 			    SPI_TX_QUAD | SPI_RX_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	master->flags = SPI_MASTER_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	master->prepare_message = qcom_qspi_prepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	master->transfer_one = qcom_qspi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	master->handle_err = qcom_qspi_handle_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	master->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	ctrl->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	if (IS_ERR(ctrl->opp_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		return PTR_ERR(ctrl->opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	/* OPP table is optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	ret = dev_pm_opp_of_add_table(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (ret && ret != -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		dev_err(&pdev->dev, "invalid OPP table in device tree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		goto exit_probe_put_clkname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	pm_runtime_use_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	pm_runtime_set_autosuspend_delay(dev, 250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	ret = spi_register_master(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	dev_pm_opp_of_remove_table(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) exit_probe_put_clkname:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	dev_pm_opp_put_clkname(ctrl->opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int qcom_qspi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	struct spi_master *master = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct qcom_qspi *ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	/* Unregister _before_ disabling pm_runtime() so we stop transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	spi_unregister_master(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	dev_pm_opp_of_remove_table(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	dev_pm_opp_put_clkname(ctrl->opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	struct qcom_qspi *ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	/* Drop the performance state vote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	dev_pm_opp_set_rate(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	struct qcom_qspi *ctrl = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int __maybe_unused qcom_qspi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	ret = spi_master_suspend(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	ret = pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int __maybe_unused qcom_qspi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	ret = pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	ret = spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			   qcom_qspi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static const struct of_device_id qcom_qspi_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	{ .compatible = "qcom,qspi-v1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static struct platform_driver qcom_qspi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		.name		= "qcom_qspi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		.pm		= &qcom_qspi_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		.of_match_table = qcom_qspi_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	.probe = qcom_qspi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	.remove = qcom_qspi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) module_platform_driver(qcom_qspi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) MODULE_DESCRIPTION("SPI driver for QSPI cores");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) MODULE_LICENSE("GPL v2");