Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * MediaTek UART APDMA driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2019 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Long Cheng <long.cheng@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* The default number of virtual channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define MTK_UART_APDMA_NR_VCHANS	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define VFF_EN_B		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define VFF_STOP_B		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define VFF_FLUSH_B		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define VFF_4G_EN_B		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /* rx valid size >=  vff thre */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define VFF_RX_INT_EN_B		(BIT(0) | BIT(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /* tx left size >= vff thre */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define VFF_TX_INT_EN_B		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define VFF_WARM_RST_B		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define VFF_RX_INT_CLR_B	(BIT(0) | BIT(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define VFF_TX_INT_CLR_B	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define VFF_STOP_CLR_B		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define VFF_EN_CLR_B		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define VFF_INT_EN_CLR_B	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define VFF_4G_SUPPORT_CLR_B	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * interrupt trigger level for tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * if threshold is n, no polling is required to start tx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * otherwise need polling VFF_FLUSH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define VFF_TX_THRE(n)		(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) /* interrupt trigger level for rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define VFF_RX_THRE(n)		((n) * 3 / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define VFF_RING_SIZE	0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* invert this bit when wrap ring head again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define VFF_RING_WRAP	0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define VFF_INT_FLAG		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define VFF_INT_EN		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define VFF_EN			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define VFF_RST			0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define VFF_STOP		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define VFF_FLUSH		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define VFF_ADDR		0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define VFF_LEN			0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define VFF_THRE		0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define VFF_WPT			0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define VFF_RPT			0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /* TX: the buffer size HW can read. RX: the buffer size SW can read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define VFF_VALID_SIZE		0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /* TX: the buffer size SW can write. RX: the buffer size HW can write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define VFF_LEFT_SIZE		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define VFF_DEBUG_STATUS	0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define VFF_4G_SUPPORT		0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) struct mtk_uart_apdmadev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct dma_device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	bool support_33bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	unsigned int dma_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) struct mtk_uart_apdma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	unsigned int avail_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) struct mtk_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct dma_slave_config	cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	struct mtk_uart_apdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	unsigned int rx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline struct mtk_uart_apdmadev *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) to_mtk_uart_apdma_dev(struct dma_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return container_of(d, struct mtk_uart_apdmadev, ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	return container_of(c, struct mtk_chan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	(struct dma_async_tx_descriptor *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void mtk_uart_apdma_write(struct mtk_chan *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			       unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	writel(val, c->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return readl(c->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct mtk_uart_apdmadev *mtkd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				to_mtk_uart_apdma_dev(c->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	struct mtk_uart_apdma_desc *d = c->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	unsigned int wpt, vff_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	vff_sz = c->cfg.dst_port_window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		mtk_uart_apdma_write(c, VFF_WPT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (mtkd->support_33bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	wpt = mtk_uart_apdma_read(c, VFF_WPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	wpt += c->desc->avail_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if ((wpt & VFF_RING_SIZE) == vff_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* Let DMA start moving data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	mtk_uart_apdma_write(c, VFF_WPT, wpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/* HW auto set to 0 when left size >= threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	if (!mtk_uart_apdma_read(c, VFF_FLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct mtk_uart_apdmadev *mtkd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				to_mtk_uart_apdma_dev(c->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	struct mtk_uart_apdma_desc *d = c->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	unsigned int vff_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	vff_sz = c->cfg.src_port_window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		mtk_uart_apdma_write(c, VFF_RPT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (mtkd->support_33bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct mtk_uart_apdma_desc *d = c->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	unsigned int len, wg, rg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	len = c->cfg.src_port_window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	rg = mtk_uart_apdma_read(c, VFF_RPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	wg = mtk_uart_apdma_read(c, VFF_WPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * The buffer is ring buffer. If wrap bit different,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * represents the start of the next cycle for WPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if ((rg ^ wg) & VFF_RING_WRAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		cnt += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	c->rx_status = d->avail_len - cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	mtk_uart_apdma_write(c, VFF_RPT, wg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct mtk_uart_apdma_desc *d = c->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		list_del(&d->vd.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		vchan_cookie_complete(&d->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		c->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	struct dma_chan *chan = (struct dma_chan *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (c->dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		mtk_uart_apdma_rx_handler(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	else if (c->dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		mtk_uart_apdma_tx_handler(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	mtk_uart_apdma_chan_complete_handler(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	ret = pm_runtime_get_sync(mtkd->ddev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		pm_runtime_put_noidle(chan->device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	mtk_uart_apdma_write(c, VFF_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	mtk_uart_apdma_write(c, VFF_THRE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	mtk_uart_apdma_write(c, VFF_LEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	ret = readx_poll_timeout(readl, c->base + VFF_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			  status, !status, 10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			  IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		dev_err(chan->device->dev, "Can't request dma IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (mtkd->support_33bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	free_irq(c->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	tasklet_kill(&c->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	vchan_free_chan_resources(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	pm_runtime_put_sync(mtkd->ddev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 					 dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 					 struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if (!txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	dma_set_residue(txstate, c->rx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * dmaengine_prep_slave_single will call the function. and sglen is 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * 8250 uart using one ring buffer, and deal with one sg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	unsigned int sglen, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned long tx_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct mtk_uart_apdma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (!is_slave_direction(dir) || sglen != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	/* Now allocate and setup the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	d = kzalloc(sizeof(*d), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	d->avail_len = sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	d->addr = sg_dma_address(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	c->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	if (vchan_issue_pending(&c->vc) && !c->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		vd = vchan_next_desc(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		c->desc = to_mtk_uart_apdma_desc(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		if (c->dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			mtk_uart_apdma_start_rx(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		else if (c->dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			mtk_uart_apdma_start_tx(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 				   struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	memcpy(&c->cfg, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			  status, status != VFF_FLUSH_B, 10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	 * Stop need 3 steps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	 * 1. set stop to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * 2. wait en to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 * 3. set stop as 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	ret = readx_poll_timeout(readl, c->base + VFF_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			  status, !status, 10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	if (c->dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	else if (c->dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	synchronize_irq(c->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	vchan_get_all_descriptors(&c->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	vchan_dma_desc_free_list(&c->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	synchronize_irq(c->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	while (!list_empty(&mtkd->ddev.channels)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			struct mtk_chan, vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		list_del(&c->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		tasklet_kill(&c->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static const struct of_device_id mtk_uart_apdma_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	{ .compatible = "mediatek,mt6577-uart-dma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	{ /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int mtk_uart_apdma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	struct mtk_uart_apdmadev *mtkd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	int bit_mask = 32, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	struct mtk_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	if (!mtkd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	mtkd->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (IS_ERR(mtkd->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		dev_err(&pdev->dev, "No clock specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		rc = PTR_ERR(mtkd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (of_property_read_bool(np, "mediatek,dma-33bits"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		mtkd->support_33bits = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (mtkd->support_33bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		bit_mask = 33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	mtkd->ddev.device_alloc_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 				mtk_uart_apdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	mtkd->ddev.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 				mtk_uart_apdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	mtkd->ddev.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	INIT_LIST_HEAD(&mtkd->ddev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			 "Using %u as missing dma-requests property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			 MTK_UART_APDMA_NR_VCHANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	for (i = 0; i < mtkd->dma_requests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			goto err_no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		c->base = devm_platform_ioremap_resource(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		if (IS_ERR(c->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			rc = PTR_ERR(c->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			goto err_no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		c->vc.desc_free = mtk_uart_apdma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		vchan_init(&c->vc, &mtkd->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		rc = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			goto err_no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		c->irq = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	rc = dma_async_device_register(&mtkd->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		goto rpm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	platform_set_drvdata(pdev, mtkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	/* Device-tree DMA controller registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		goto dma_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dma_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	dma_async_device_unregister(&mtkd->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rpm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) err_no_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	mtk_uart_apdma_free(mtkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int mtk_uart_apdma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	mtk_uart_apdma_free(mtkd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	dma_async_device_unregister(&mtkd->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int mtk_uart_apdma_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (!pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		clk_disable_unprepare(mtkd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int mtk_uart_apdma_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (!pm_runtime_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		ret = clk_prepare_enable(mtkd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int mtk_uart_apdma_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	clk_disable_unprepare(mtkd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int mtk_uart_apdma_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	return clk_prepare_enable(mtkd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 			   mtk_uart_apdma_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static struct platform_driver mtk_uart_apdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	.probe	= mtk_uart_apdma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	.remove	= mtk_uart_apdma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		.name		= KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		.pm		= &mtk_uart_apdma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		.of_match_table = of_match_ptr(mtk_uart_apdma_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) module_platform_driver(mtk_uart_apdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) MODULE_LICENSE("GPL v2");