Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Mediatek SPI NOR controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/spi/spi-mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define DRIVER_NAME "mtk-spi-nor"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define MTK_NOR_REG_CMD			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define MTK_NOR_CMD_WRITE		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define MTK_NOR_CMD_PROGRAM		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define MTK_NOR_CMD_READ		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define MTK_NOR_CMD_MASK		GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define MTK_NOR_REG_PRG_CNT		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define MTK_NOR_PRG_CNT_MAX		56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define MTK_NOR_REG_RDATA		0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define MTK_NOR_REG_RADR0		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define MTK_NOR_REG_RADR(n)		(MTK_NOR_REG_RADR0 + 4 * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define MTK_NOR_REG_RADR3		0xc8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define MTK_NOR_REG_WDATA		0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define MTK_NOR_REG_PRGDATA0		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define MTK_NOR_REG_PRGDATA(n)		(MTK_NOR_REG_PRGDATA0 + 4 * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define MTK_NOR_REG_PRGDATA_MAX		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define MTK_NOR_REG_SHIFT0		0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define MTK_NOR_REG_SHIFT(n)		(MTK_NOR_REG_SHIFT0 + 4 * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define MTK_NOR_REG_SHIFT_MAX		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define MTK_NOR_REG_CFG1		0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define MTK_NOR_FAST_READ		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define MTK_NOR_REG_CFG2		0x64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define MTK_NOR_WR_CUSTOM_OP_EN		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define MTK_NOR_WR_BUF_EN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define MTK_NOR_REG_PP_DATA		0x98
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define MTK_NOR_REG_IRQ_STAT		0xa8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define MTK_NOR_REG_IRQ_EN		0xac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define MTK_NOR_IRQ_DMA			BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define MTK_NOR_IRQ_MASK		GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define MTK_NOR_REG_CFG3		0xb4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define MTK_NOR_DISABLE_WREN		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define MTK_NOR_DISABLE_SR_POLL		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define MTK_NOR_REG_WP			0xc4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define MTK_NOR_ENABLE_SF_CMD		0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define MTK_NOR_REG_BUSCFG		0xcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define MTK_NOR_4B_ADDR			BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define MTK_NOR_QUAD_ADDR		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define MTK_NOR_QUAD_READ		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define MTK_NOR_DUAL_ADDR		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define MTK_NOR_DUAL_READ		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define MTK_NOR_BUS_MODE_MASK		GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define MTK_NOR_REG_DMA_CTL		0x718
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define MTK_NOR_DMA_START		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define MTK_NOR_REG_DMA_FADR		0x71c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define MTK_NOR_REG_DMA_DADR		0x720
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define MTK_NOR_REG_DMA_END_DADR	0x724
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define MTK_NOR_REG_DMA_DADR_HB		0x738
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define MTK_NOR_REG_DMA_END_DADR_HB	0x73c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define MTK_NOR_PRG_MAX_SIZE		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) // Reading DMA src/dst addresses have to be 16-byte aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define MTK_NOR_DMA_ALIGN		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define MTK_NOR_DMA_ALIGN_MASK		(MTK_NOR_DMA_ALIGN - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) // and we allocate a bounce buffer if destination address isn't aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define MTK_NOR_BOUNCE_BUF_SIZE		PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) // Buffered page program can do one 128-byte transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define MTK_NOR_PP_SIZE			128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define CLK_TO_US(sp, clkcnt)		DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) struct mtk_nor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	u8 *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	dma_addr_t buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct clk *spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct clk *ctlr_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	unsigned int spi_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	bool wbuf_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	bool has_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	bool high_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct completion op_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	u32 val = readl(sp->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	val &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	writel(val, sp->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ulong delay = CLK_TO_US(sp, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	writel(cmd, sp->base + MTK_NOR_REG_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 				 delay / 3, (delay + 1) * 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		dev_err(sp->dev, "command %u timeout.\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	u32 addr = op->addr.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		addr >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (op->addr.nbytes == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static bool mtk_nor_match_read(const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	int dummy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (op->dummy.buswidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		if (op->addr.buswidth == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			return dummy == 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		else if (op->addr.buswidth == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			return dummy == 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		else if (op->addr.buswidth == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			return dummy == 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	} else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		if (op->cmd.opcode == 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			return dummy == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		else if (op->cmd.opcode == 0x0b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			return dummy == 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static bool mtk_nor_match_prg(const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	int tx_len, rx_len, prg_len, prg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	// prg mode is spi-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	    (op->dummy.buswidth > 1) || (op->data.buswidth > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	tx_len = op->cmd.nbytes + op->addr.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (op->data.dir == SPI_MEM_DATA_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		// count dummy bytes only if we need to write data after it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		tx_len += op->dummy.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		// leave at least one byte for data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		if (tx_len > MTK_NOR_REG_PRGDATA_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		// if there's no addr, meaning adjust_op_size is impossible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		// check data length as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if ((!op->addr.nbytes) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		    (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	} else if (op->data.dir == SPI_MEM_DATA_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		rx_len = op->data.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (rx_len > prg_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			if (!op->addr.nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			rx_len = prg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		prg_len = tx_len + op->dummy.nbytes + rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		prg_len = tx_len + op->dummy.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void mtk_nor_adj_prg_size(struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int tx_len, tx_left, prg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	tx_len = op->cmd.nbytes + op->addr.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (op->data.dir == SPI_MEM_DATA_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		tx_len += op->dummy.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		if (op->data.nbytes > tx_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			op->data.nbytes = tx_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	} else if (op->data.dir == SPI_MEM_DATA_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		if (op->data.nbytes > prg_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			op->data.nbytes = prg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (!op->data.nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		if ((op->data.dir == SPI_MEM_DATA_IN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		    mtk_nor_match_read(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			// limit size to prevent timeout calculation overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			if (op->data.nbytes > 0x400000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				op->data.nbytes = 0x400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			    (op->data.nbytes < MTK_NOR_DMA_ALIGN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 				op->data.nbytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			else if (!need_bounce(sp, op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		} else if (op->data.dir == SPI_MEM_DATA_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			if (op->data.nbytes >= MTK_NOR_PP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 				op->data.nbytes = MTK_NOR_PP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				op->data.nbytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	mtk_nor_adj_prg_size(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static bool mtk_nor_supports_op(struct spi_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 				const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (!spi_mem_default_supports_op(mem, op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (op->cmd.buswidth != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		switch(op->data.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		case SPI_MEM_DATA_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			if (mtk_nor_match_read(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		case SPI_MEM_DATA_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			if ((op->addr.buswidth == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			    (op->dummy.nbytes == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			    (op->data.buswidth == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return mtk_nor_match_prg(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (op->addr.nbytes == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		reg |= MTK_NOR_4B_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (op->data.buswidth == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		reg |= MTK_NOR_QUAD_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		if (op->addr.buswidth == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			reg |= MTK_NOR_QUAD_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	} else if (op->data.buswidth == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		reg |= MTK_NOR_DUAL_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		if (op->addr.buswidth == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			reg |= MTK_NOR_DUAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (op->cmd.opcode == 0x0b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			    dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	ulong delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (sp->high_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		writel(upper_32_bits(dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		       sp->base + MTK_NOR_REG_DMA_DADR_HB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		writel(upper_32_bits(dma_addr + length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		       sp->base + MTK_NOR_REG_DMA_END_DADR_HB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (sp->has_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		reinit_completion(&sp->op_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	if (sp->has_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if (!wait_for_completion_timeout(&sp->op_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 						 (delay + 1) * 100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 					 !(reg & MTK_NOR_DMA_START), delay / 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 					 (delay + 1) * 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		dev_err(sp->dev, "dma read timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	unsigned int rdlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		rdlen = op->data.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (need_bounce(sp, op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		return mtk_nor_read_bounce(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	dma_addr = dma_map_single(sp->dev, op->data.buf.in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 				  op->data.nbytes, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (dma_mapping_error(sp->dev, dma_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	u8 *buf = op->data.buf.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (sp->wbuf_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	val = readl(sp->base + MTK_NOR_REG_CFG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 				 val & MTK_NOR_WR_BUF_EN, 0, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		sp->wbuf_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	if (!sp->wbuf_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	val = readl(sp->base + MTK_NOR_REG_CFG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 				 !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		sp->wbuf_en = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	const u8 *buf = op->data.buf.out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	ret = mtk_nor_write_buffer_enable(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	for (i = 0; i < op->data.nbytes; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		      buf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		writel(val, sp->base + MTK_NOR_REG_PP_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				(op->data.nbytes + 5) * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				 const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	const u8 *buf = op->data.buf.out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	ret = mtk_nor_write_buffer_disable(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	int rx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	int tx_len, prg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	u8 bufbyte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	tx_len = op->cmd.nbytes + op->addr.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	// count dummy bytes only if we need to write data after it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	if (op->data.dir == SPI_MEM_DATA_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		tx_len += op->dummy.nbytes + op->data.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	else if (op->data.dir == SPI_MEM_DATA_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		rx_len = op->data.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		  op->data.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	// an invalid op may reach here if the caller calls exec_op without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	// adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	// spi-mem won't try this op again with generic spi transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	    (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	    (prg_len > MTK_NOR_PRG_CNT_MAX / 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	// fill tx data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		writeb(bufbyte, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	for (i = op->addr.nbytes; i > 0; i--, reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		writeb(bufbyte, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (op->data.dir == SPI_MEM_DATA_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			writeb(0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			writeb(((const u8 *)(op->data.buf.out))[i], reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	for (; reg_offset >= 0; reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		writeb(0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	// trigger op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			       prg_len * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	// fetch read data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	reg_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (op->data.dir == SPI_MEM_DATA_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			((u8 *)(op->data.buf.in))[i] = readb(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if ((op->data.nbytes == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	    ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		return mtk_nor_spi_mem_prg(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (op->data.dir == SPI_MEM_DATA_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		mtk_nor_set_addr(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		if (op->data.nbytes == MTK_NOR_PP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 			return mtk_nor_pp_buffered(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		return mtk_nor_pp_unbuffered(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		ret = mtk_nor_write_buffer_disable(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		mtk_nor_setup_bus(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		if (op->data.nbytes == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			mtk_nor_set_addr(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			return mtk_nor_read_pio(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 			return mtk_nor_read_dma(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	return mtk_nor_spi_mem_prg(sp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static int mtk_nor_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		dev_err(&spi->dev, "spi clock should be %u Hz.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 			sp->spi_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	spi->max_speed_hz = sp->spi_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static int mtk_nor_transfer_one_message(struct spi_controller *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 					struct spi_message *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	struct mtk_nor *sp = spi_controller_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	struct spi_transfer *t = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	unsigned long trx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	int stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	const u8 *txbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	u8 *rxbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	list_for_each_entry(t, &m->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		txbuf = t->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		for (i = 0; i < t->len; i++, reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 			reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 			if (txbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 				writeb(txbuf[i], reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 				writeb(0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		trx_len += t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 				trx_len * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	if (stat < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		goto msg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	reg_offset = trx_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	list_for_each_entry(t, &m->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		rxbuf = t->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		for (i = 0; i < t->len; i++, reg_offset--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			if (rxbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 				rxbuf[i] = readb(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	m->actual_length = trx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) msg_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	m->status = stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	spi_finalize_current_message(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void mtk_nor_disable_clk(struct mtk_nor *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	clk_disable_unprepare(sp->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	clk_disable_unprepare(sp->ctlr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static int mtk_nor_enable_clk(struct mtk_nor *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	ret = clk_prepare_enable(sp->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	ret = clk_prepare_enable(sp->ctlr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		clk_disable_unprepare(sp->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void mtk_nor_init(struct mtk_nor *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		    MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	struct mtk_nor *sp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	u32 irq_status, irq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	// write status back to clear interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	if (!(irq_status & irq_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	if (irq_status & MTK_NOR_IRQ_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		complete(&sp->op_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static size_t mtk_max_msg_size(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	return MTK_NOR_PRG_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	.adjust_op_size = mtk_nor_adjust_op_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	.supports_op = mtk_nor_supports_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	.exec_op = mtk_nor_exec_op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static const struct of_device_id mtk_nor_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	{ .compatible = "mediatek,mt8192-nor", .data = (void *)36 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	{ .compatible = "mediatek,mt8173-nor", .data = (void *)32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	{ /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) MODULE_DEVICE_TABLE(of, mtk_nor_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int mtk_nor_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	struct mtk_nor *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	struct clk *spi_clk, *ctlr_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	unsigned long dma_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	spi_clk = devm_clk_get(&pdev->dev, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	if (IS_ERR(spi_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		return PTR_ERR(spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	ctlr_clk = devm_clk_get(&pdev->dev, "sf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	if (IS_ERR(ctlr_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		return PTR_ERR(ctlr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	if (!ctlr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		dev_err(&pdev->dev, "failed to allocate spi controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	ctlr->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	ctlr->max_message_size = mtk_max_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	ctlr->mem_ops = &mtk_nor_mem_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	ctlr->num_chipselect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	ctlr->setup = mtk_nor_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	ctlr->transfer_one_message = mtk_nor_transfer_one_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	ctlr->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	dev_set_drvdata(&pdev->dev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	sp = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	sp->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	sp->has_irq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	sp->wbuf_en = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	sp->ctlr = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	sp->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	sp->spi_clk = spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	sp->ctlr_clk = ctlr_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	sp->high_dma = (dma_bits > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	sp->buffer = dmam_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 				MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 				&sp->buffer_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	if (!sp->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 		dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	ret = mtk_nor_enable_clk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	sp->spi_freq = clk_get_rate(sp->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	mtk_nor_init(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	irq = platform_get_irq_optional(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		dev_warn(sp->dev, "IRQ not available.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 				       pdev->name, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 			dev_warn(sp->dev, "failed to request IRQ.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 			init_completion(&sp->op_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 			sp->has_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	pm_runtime_get_noresume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		goto err_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	pm_runtime_mark_last_busy(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	pm_runtime_put_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) err_probe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	pm_runtime_dont_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	mtk_nor_disable_clk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static int mtk_nor_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	pm_runtime_dont_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 	mtk_nor_disable_clk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 	struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	mtk_nor_disable_clk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int __maybe_unused mtk_nor_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 	return mtk_nor_enable_clk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static int __maybe_unused mtk_nor_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	return pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int __maybe_unused mtk_nor_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	return pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static const struct dev_pm_ops mtk_nor_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 			   mtk_nor_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 	SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static struct platform_driver mtk_nor_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 		.name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		.of_match_table = mtk_nor_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 		.pm = &mtk_nor_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 	.probe = mtk_nor_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	.remove = mtk_nor_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) module_platform_driver(mtk_nor_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) MODULE_ALIAS("platform:" DRIVER_NAME);