Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Copyright (C) 2017-2018 Socionext Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) //   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mfd/tmio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "tmio_mmc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define   UNIPHIER_SD_CLK_CTL_DIV1024		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define   UNIPHIER_SD_CLK_CTL_DIV1		BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define   UNIPHIER_SD_CLKCTL_OFFEN		BIT(9)  // auto SDCLK stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define UNIPHIER_SD_CC_EXT_MODE		0x1b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define   UNIPHIER_SD_CC_EXT_MODE_DMA		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define UNIPHIER_SD_HOST_MODE		0x1c8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define UNIPHIER_SD_VOLT		0x1e4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define   UNIPHIER_SD_VOLT_MASK			GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define   UNIPHIER_SD_VOLT_OFF			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define   UNIPHIER_SD_VOLT_330			1	// 3.3V signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define   UNIPHIER_SD_VOLT_180			2	// 1.8V signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define UNIPHIER_SD_DMA_MODE		0x410
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define   UNIPHIER_SD_DMA_MODE_DIR_MASK		GENMASK(17, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define   UNIPHIER_SD_DMA_MODE_DIR_TO_DEV	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define   UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define   UNIPHIER_SD_DMA_MODE_WIDTH_MASK	GENMASK(5, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define   UNIPHIER_SD_DMA_MODE_WIDTH_8		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define   UNIPHIER_SD_DMA_MODE_WIDTH_16		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define   UNIPHIER_SD_DMA_MODE_WIDTH_32		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define   UNIPHIER_SD_DMA_MODE_WIDTH_64		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define   UNIPHIER_SD_DMA_MODE_ADDR_INC		BIT(0)	// 1: inc, 0: fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define UNIPHIER_SD_DMA_CTL		0x414
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define   UNIPHIER_SD_DMA_CTL_START	BIT(0)	// start DMA (auto cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define UNIPHIER_SD_DMA_RST		0x418
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define   UNIPHIER_SD_DMA_RST_CH1	BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define   UNIPHIER_SD_DMA_RST_CH0	BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define UNIPHIER_SD_DMA_ADDR_L		0x440
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define UNIPHIER_SD_DMA_ADDR_H		0x444
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * IP is extended to support various features: built-in DMA engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * 1/1024 divisor, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define UNIPHIER_SD_CAP_EXTENDED_IP		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /* RX channel of the built-in DMA controller is broken (Pro5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define UNIPHIER_SD_CAP_BROKEN_DMA_RX		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct uniphier_sd_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct tmio_mmc_data tmio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct pinctrl *pinctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct pinctrl_state *pinstate_uhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct reset_control *rst_br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct reset_control *rst_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	enum dma_data_direction dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned long clk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned long caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static void *uniphier_sd_priv(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	return container_of(host->pdata, struct uniphier_sd_priv, tmio_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /* external DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static void uniphier_sd_external_dma_issue(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct tmio_mmc_host *host = (void *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	uniphier_sd_dma_endisable(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	dma_async_issue_pending(priv->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static void uniphier_sd_external_dma_callback(void *param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 					const struct dmaengine_result *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct tmio_mmc_host *host = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		     priv->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (result->result == DMA_TRANS_NOERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 * When the external DMA engine is enabled, strangely enough,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 * the DATAEND flag can be asserted even if the DMA engine has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		 * not been kicked yet.  Enable the TMIO_STAT_DATAEND irq only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		 * after we make sure the DMA engine finishes the transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		 * hence, in this callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		host->data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		tmio_mmc_do_data_irq(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 					   struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	enum dma_transfer_direction dma_tx_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (!priv->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		goto force_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		priv->dma_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		dma_tx_dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		priv->dma_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		dma_tx_dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			    priv->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (sg_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		goto force_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				       dma_tx_dir, DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	desc->callback_result = uniphier_sd_external_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	desc->callback_param = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (cookie < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	host->dma_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unmap_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		     priv->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) force_pio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	uniphier_sd_dma_endisable(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 					    bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 					     struct tmio_mmc_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (IS_ERR(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			 "failed to request DMA channel. falling back to PIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return;	/* just use PIO even for -EPROBE_DEFER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	/* this driver uses a single channel for both RX an TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	priv->chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	host->chan_rx = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	host->chan_tx = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		     (unsigned long)host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (priv->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		dma_release_channel(priv->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	uniphier_sd_dma_endisable(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (priv->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		dmaengine_terminate_sync(priv->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	uniphier_sd_dma_endisable(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	tmio_mmc_do_data_irq(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	.start = uniphier_sd_external_dma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	.enable = uniphier_sd_external_dma_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	.request = uniphier_sd_external_dma_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	.release = uniphier_sd_external_dma_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	.abort = uniphier_sd_external_dma_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	.dataend = uniphier_sd_external_dma_dataend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void uniphier_sd_internal_dma_issue(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	struct tmio_mmc_host *host = (void *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	uniphier_sd_dma_endisable(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 					   struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	struct scatterlist *sg = host->sg_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	unsigned int dma_mode_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	u32 dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if ((data->flags & MMC_DATA_READ) && !host->chan_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		goto force_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (WARN_ON(host->sg_len != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		goto force_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (!IS_ALIGNED(sg->offset, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		goto force_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		priv->dma_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		priv->dma_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (sg_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		goto force_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			       UNIPHIER_SD_DMA_MODE_WIDTH_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	dma_addr = sg_dma_address(data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	host->dma_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) force_pio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	uniphier_sd_dma_endisable(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 					    bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 					     struct tmio_mmc_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 * Due to a hardware bug, Pro5 cannot use DMA for RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	 * We can still use DMA for TX, but PIO for RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		host->chan_rx = (void *)0xdeadbeaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	host->chan_tx = (void *)0xdeadbeaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		     (unsigned long)host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	/* Each value is set to zero to assume "disabling" each DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	uniphier_sd_dma_endisable(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	uniphier_sd_dma_endisable(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	tmio_mmc_do_data_irq(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	.start = uniphier_sd_internal_dma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	.enable = uniphier_sd_internal_dma_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	.request = uniphier_sd_internal_dma_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	.release = uniphier_sd_internal_dma_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	.abort = uniphier_sd_internal_dma_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	.dataend = uniphier_sd_internal_dma_dataend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int uniphier_sd_clk_enable(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	ret = clk_set_rate(priv->clk, ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	priv->clk_rate = clk_get_rate(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	/* If max-frequency property is set, use it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (!mmc->f_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		mmc->f_max = priv->clk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	 * 1/512 is the finest divisor in the original IP.  Newer versions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	 * also supports 1/1024 divisor. (UniPhier-specific extension)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		mmc->f_min = priv->clk_rate / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		mmc->f_min = priv->clk_rate / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	ret = reset_control_deassert(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	ret = reset_control_deassert(priv->rst_br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		goto assert_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) assert_rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	reset_control_assert(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void uniphier_sd_clk_disable(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	reset_control_assert(priv->rst_br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	reset_control_assert(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void uniphier_sd_hw_reset(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	struct tmio_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	reset_control_assert(priv->rst_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	/* For eMMC, minimum is 1us but give it 9us for good measure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	udelay(9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	reset_control_deassert(priv->rst_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	/* For eMMC, minimum is 200us but give it 300us for good measure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	usleep_range(300, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void uniphier_sd_set_clock(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 				  unsigned int clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	unsigned long divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	/* stop the clock before changing its rate to avoid a glitch signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	tmp &= ~CLK_CTL_SCLKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (clock == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	tmp &= ~CLK_CTL_DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	divisor = priv->clk_rate / clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	 * In the original IP, bit[7:0] represents the divisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	 * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	 * The IP does not define a way to achieve 1/1.  For UniPhier variants,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	 * bit10 is used for 1/1.  Newer versions of UniPhier variants use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	 * bit16 for 1/1024.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	if (divisor <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		tmp |= UNIPHIER_SD_CLK_CTL_DIV1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		tmp |= UNIPHIER_SD_CLK_CTL_DIV1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		tmp |= roundup_pow_of_two(divisor) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	tmp |= CLK_CTL_SCLKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void uniphier_sd_host_init(struct tmio_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 * Connected to 32bit AXI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	 * This register holds settings for SoC-specific internal bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	 * connection.  What is worse, the register spec was changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	 * breaking the backward compatibility.  Write an appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	 * value depending on a flag associated with a compatible string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		val = 0x00000101;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		val = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	writel(val, host->ctl + UNIPHIER_SD_HOST_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	 * If supported, the controller can automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	 * enable/disable the clock line to the card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		val |= UNIPHIER_SD_CLKCTL_OFFEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 						   struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	struct tmio_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	struct pinctrl_state *pinstate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	u32 val, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	switch (ios->signal_voltage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	case MMC_SIGNAL_VOLTAGE_330:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		val = UNIPHIER_SD_VOLT_330;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	case MMC_SIGNAL_VOLTAGE_180:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		val = UNIPHIER_SD_VOLT_180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		pinstate = priv->pinstate_uhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	tmp = readl(host->ctl + UNIPHIER_SD_VOLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	tmp &= ~UNIPHIER_SD_VOLT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	if (pinstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		pinctrl_select_state(priv->pinctrl, pinstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		pinctrl_select_default_state(mmc_dev(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 				struct uniphier_sd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	if (IS_ERR(priv->pinctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		return PTR_ERR(priv->pinctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (IS_ERR(priv->pinstate_uhs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		return PTR_ERR(priv->pinstate_uhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	host->ops.start_signal_voltage_switch =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 					uniphier_sd_start_signal_voltage_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static int uniphier_sd_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	struct uniphier_sd_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	struct tmio_mmc_data *tmio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	struct tmio_mmc_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	priv->caps = (unsigned long)of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	priv->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		dev_err(dev, "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	priv->rst = devm_reset_control_get_shared(dev, "host");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (IS_ERR(priv->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		dev_err(dev, "failed to get host reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		return PTR_ERR(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	/* old version has one more reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		priv->rst_br = devm_reset_control_get_shared(dev, "bridge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		if (IS_ERR(priv->rst_br)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 			dev_err(dev, "failed to get bridge reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			return PTR_ERR(priv->rst_br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	tmio_data = &priv->tmio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	host = tmio_mmc_host_alloc(pdev, tmio_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	if (IS_ERR(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		return PTR_ERR(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	if (host->mmc->caps & MMC_CAP_HW_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		if (IS_ERR(priv->rst_hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			dev_err(dev, "failed to get hw reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			ret = PTR_ERR(priv->rst_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 			goto free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		host->ops.hw_reset = uniphier_sd_hw_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (host->mmc->caps & MMC_CAP_UHS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		ret = uniphier_sd_uhs_init(host, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 			dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 				 "failed to setup UHS (error %d).  Disabling UHS.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 				 ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 			host->mmc->caps &= ~MMC_CAP_UHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		host->dma_ops = &uniphier_sd_internal_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		host->dma_ops = &uniphier_sd_external_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	host->bus_shift = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	host->clk_enable = uniphier_sd_clk_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	host->clk_disable = uniphier_sd_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	host->set_clock = uniphier_sd_set_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	ret = uniphier_sd_clk_enable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		goto free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	uniphier_sd_host_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	if (host->mmc->caps & MMC_CAP_UHS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		tmio_data->ocr_mask |= MMC_VDD_165_195;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	tmio_data->max_segs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	tmio_data->max_blk_count = U16_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	ret = tmio_mmc_host_probe(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 			       dev_name(dev), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		goto remove_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) remove_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	tmio_mmc_host_remove(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	uniphier_sd_clk_disable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) free_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	tmio_mmc_host_free(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int uniphier_sd_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	struct tmio_mmc_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	tmio_mmc_host_remove(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	uniphier_sd_clk_disable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	tmio_mmc_host_free(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static const struct of_device_id uniphier_sd_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		.compatible = "socionext,uniphier-sd-v2.91",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		.compatible = "socionext,uniphier-sd-v3.1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		.data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 				 UNIPHIER_SD_CAP_BROKEN_DMA_RX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		.compatible = "socionext,uniphier-sd-v3.1.1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		.data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	{ /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) MODULE_DEVICE_TABLE(of, uniphier_sd_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static struct platform_driver uniphier_sd_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	.probe = uniphier_sd_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	.remove = uniphier_sd_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		.name = "uniphier-sd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		.of_match_table = uniphier_sd_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) module_platform_driver(uniphier_sd_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) MODULE_LICENSE("GPL v2");