Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * ThunderX SOCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 2012-2017 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *   David Daney <david.daney@cavium.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *   Peter Swain <pswain@cavium.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *   Steven J. Hill <steven.hill@cavium.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *   Jan Glauber <jglauber@cavium.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "cavium.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) const char *cvm_mmc_irq_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	"MMC Buffer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	"MMC Command",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	"MMC DMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	"MMC Command Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	"MMC DMA Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	"MMC Switch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	"MMC Switch Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	"MMC DMA int Fifo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	"MMC DMA int",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * The Cavium MMC host hardware assumes that all commands have fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * command and response types.  These are correct if MMC devices are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * being used.  However, non-MMC devices like SD use command and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * response types that are unexpected by the host hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * The command and response types can be overridden by supplying an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * XOR value that is applied to the type.  We calculate the XOR value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * from the values in this table and the flags passed from the MMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	{0, 0},		/* CMD0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	{0, 3},		/* CMD1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	{0, 2},		/* CMD2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	{0, 1},		/* CMD3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	{0, 0},		/* CMD4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	{0, 1},		/* CMD5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	{0, 1},		/* CMD6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	{0, 1},		/* CMD7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	{1, 1},		/* CMD8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	{0, 2},		/* CMD9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	{0, 2},		/* CMD10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	{1, 1},		/* CMD11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	{0, 1},		/* CMD12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	{0, 1},		/* CMD13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	{1, 1},		/* CMD14 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	{0, 0},		/* CMD15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	{0, 1},		/* CMD16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	{1, 1},		/* CMD17 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	{1, 1},		/* CMD18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	{3, 1},		/* CMD19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	{2, 1},		/* CMD20 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	{0, 0},		/* CMD21 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	{0, 0},		/* CMD22 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	{0, 1},		/* CMD23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	{2, 1},		/* CMD24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	{2, 1},		/* CMD25 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	{2, 1},		/* CMD26 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	{2, 1},		/* CMD27 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	{0, 1},		/* CMD28 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	{0, 1},		/* CMD29 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	{1, 1},		/* CMD30 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	{1, 1},		/* CMD31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	{0, 0},		/* CMD32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	{0, 0},		/* CMD33 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	{0, 0},		/* CMD34 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	{0, 1},		/* CMD35 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	{0, 1},		/* CMD36 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	{0, 0},		/* CMD37 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	{0, 1},		/* CMD38 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	{0, 4},		/* CMD39 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	{0, 5},		/* CMD40 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	{0, 0},		/* CMD41 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	{2, 1},		/* CMD42 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	{0, 0},		/* CMD43 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	{0, 0},		/* CMD44 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	{0, 0},		/* CMD45 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	{0, 0},		/* CMD46 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	{0, 0},		/* CMD47 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	{0, 0},		/* CMD48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	{0, 0},		/* CMD49 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	{0, 0},		/* CMD50 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	{0, 0},		/* CMD51 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	{0, 0},		/* CMD52 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	{0, 0},		/* CMD53 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	{0, 0},		/* CMD54 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	{0, 1},		/* CMD55 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	{0xff, 0xff},	/* CMD56 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	{0, 0},		/* CMD57 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	{0, 0},		/* CMD58 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	{0, 0},		/* CMD59 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	{0, 0},		/* CMD60 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	{0, 0},		/* CMD61 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	{0, 0},		/* CMD62 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	{0, 0}		/* CMD63 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct cvm_mmc_cr_type *cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u8 hardware_ctype, hardware_rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	u8 desired_ctype = 0, desired_rtype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct cvm_mmc_cr_mods r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	hardware_ctype = cr->ctype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	hardware_rtype = cr->rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	if (cmd->opcode == MMC_GEN_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		hardware_ctype = (cmd->arg & 1) ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	switch (mmc_cmd_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	case MMC_CMD_ADTC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	case MMC_CMD_AC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	case MMC_CMD_BC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	case MMC_CMD_BCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		desired_ctype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	switch (mmc_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	case MMC_RSP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		desired_rtype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	case MMC_RSP_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		desired_rtype = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	case MMC_RSP_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		desired_rtype = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	case MMC_RSP_R3: /* MMC_RSP_R4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		desired_rtype = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	r.ctype_xor = desired_ctype ^ hardware_ctype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	r.rtype_xor = desired_rtype ^ hardware_rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static void check_switch_errors(struct cvm_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	u64 emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	if (emm_switch & MIO_EMM_SWITCH_ERR0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		dev_err(host->dev, "Switch power class error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (emm_switch & MIO_EMM_SWITCH_ERR1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		dev_err(host->dev, "Switch hs timing error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (emm_switch & MIO_EMM_SWITCH_ERR2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		dev_err(host->dev, "Switch bus width error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static void clear_bus_id(u64 *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	u64 bus_id_mask = GENMASK_ULL(61, 60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	*reg &= ~bus_id_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) static void set_bus_id(u64 *reg, int bus_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	clear_bus_id(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static int get_bus_id(u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return FIELD_GET(GENMASK_ULL(61, 60), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * We never set the switch_exe bit since that would interfere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * with the commands send by the MMC core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	int retries = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	u64 rsp_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	int bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	 * Modes setting only taken from slot 0. Work around that hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 * issue by first switching to slot 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	bus_id = get_bus_id(emm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	clear_bus_id(&emm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	set_bus_id(&emm_switch, bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	/* wait for the switch to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	} while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	check_switch_errors(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	u64 match = 0x3001070fffffffffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return (slot->cached_switch & match) != (new_val & match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u64 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (!slot->clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		timeout = (slot->clock * ns) / NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		timeout = (slot->clock * 850ull) / 1000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct cvm_mmc_host *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u64 emm_switch, wdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	set_bus_id(&emm_switch, slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	do_switch(slot->host, emm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	slot->cached_switch = emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) /* Switch to another slot if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct cvm_mmc_host *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct cvm_mmc_slot *old_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	u64 emm_sample, emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (slot->bus_id == host->last_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (host->last_slot >= 0 && host->slot[host->last_slot]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		old_slot = host->slot[host->last_slot];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	emm_switch = slot->cached_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	set_bus_id(&emm_switch, slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	do_switch(host, emm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	host->last_slot = slot->bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		    u64 dbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct sg_mapping_iter *smi = &host->smi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	int data_len = req->data->blocks * req->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	int bytes_xfered, shift = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	u64 dat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* Auto inc from offset zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	for (bytes_xfered = 0; bytes_xfered < data_len;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (smi->consumed >= smi->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			if (!sg_miter_next(smi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			smi->consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		if (shift < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			dat = readq(host->base + MIO_EMM_BUF_DAT(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			shift = 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		while (smi->consumed < smi->length && shift >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 			bytes_xfered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			smi->consumed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			shift -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	sg_miter_stop(smi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	req->data->bytes_xfered = bytes_xfered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	req->data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static void do_write(struct mmc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	req->data->bytes_xfered = req->data->blocks * req->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	req->data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			     u64 rsp_sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	u64 rsp_hi, rsp_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		req->cmd->resp[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		req->cmd->resp[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		req->cmd->resp[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		req->cmd->resp[3] = rsp_lo & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		req->cmd->resp[1] = rsp_hi & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static int get_dma_dir(struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	u64 fifo_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	/* Check if there are any pending requests left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		dev_err(host->dev, "%u requests still pending\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	/* Clear and disable FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (host->use_sg && data->sg_len > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		return finish_dma_sg(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		return finish_dma_single(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static int check_status(u64 rsp_sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	    rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	    rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	    rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) /* Try to clean up failed DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	u64 emm_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	emm_dma = readq(host->base + MIO_EMM_DMA(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	set_bus_id(&emm_dma, get_bus_id(rsp_sts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct cvm_mmc_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct mmc_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	u64 emm_int, rsp_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	bool host_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	if (host->need_irq_handler_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		spin_lock_irqsave(&host->irq_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		__acquire(&host->irq_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* Clear interrupt bits (write 1 clears ). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	emm_int = readq(host->base + MIO_EMM_INT(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	writeq(emm_int, host->base + MIO_EMM_INT(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (emm_int & MIO_EMM_INT_SWITCH_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		check_switch_errors(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	req = host->current_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	 * dma_val set means DMA is still in progress. Don't touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	 * the request and wait for the interrupt indicating that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	 * the DMA is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (!host->dma_active && req->data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	    (emm_int & MIO_EMM_INT_BUF_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		unsigned int type = (rsp_sts >> 7) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		if (type == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		else if (type == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			do_write(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		    emm_int & MIO_EMM_INT_DMA_DONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		    emm_int & MIO_EMM_INT_CMD_ERR  ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		    emm_int & MIO_EMM_INT_DMA_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (!(host_done && req->done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		goto no_req_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	req->cmd->error = check_status(rsp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (host->dma_active && req->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		if (!finish_dma(host, req->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			goto no_req_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	set_cmd_response(host, req, rsp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	    (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		cleanup_dma(host, rsp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	host->current_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	req->done(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) no_req_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (host->dmar_fixup_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		host->dmar_fixup_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (host_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		host->release_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (host->need_irq_handler_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		spin_unlock_irqrestore(&host->irq_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		__release(&host->irq_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	return IRQ_RETVAL(emm_int != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * Program DMA_CFG and if needed DMA_ADR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * Returns 0 on error, DMA address otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	u64 dma_cfg, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	int count, rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	count = dma_map_sg(host->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			   get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			      (sg_dma_len(&data->sg[0]) / 8) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	addr = sg_dma_address(&data->sg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (!host->big_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (host->big_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * Queue complete sg list into the FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * Returns 0 on error, 1 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	u64 fifo_cmd, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	int count, i, rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	count = dma_map_sg(host->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			   get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (count > 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	/* Enable FIFO by removing CLR bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	for_each_sg(data->sg, sg, count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		/* Program DMA address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		if (addr & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		 * If we have scatter-gather support we also have an extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		 * register for the DMA addr, so no need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 * host->big_dma_addr here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		/* enable interrupts on the last element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 				       (i + 1 == count) ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				       sg_dma_len(sg) / 8 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		 * The write copies the address and the command to the FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		 * and increments the FIFO's COUNT field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * In difference to prepare_dma_single we don't return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * address here, as it would not make sense for scatter-gather.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * The dma fixup is only required on models that don't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 * scatter-gather, so that is not a problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	/* Disable FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (host->use_sg && data->sg_len > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		return prepare_dma_sg(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		return prepare_dma_single(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	u64 emm_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		  FIELD_PREP(MIO_EMM_DMA_SECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			     mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		  FIELD_PREP(MIO_EMM_DMA_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			     (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	set_bus_id(&emm_dma, slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	    (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	pr_debug("[%s] blocks: %u  multi: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		(emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	return emm_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) static void cvm_mmc_dma_request(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 				struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct cvm_mmc_host *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	u64 emm_dma, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	    !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		dev_err(&mmc->card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			"Error: cmv_mmc_dma_request no data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	cvm_mmc_switch_to(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 data->blocks, data->blksz, data->blocks * data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (data->timeout_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		set_wdog(slot, data->timeout_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	WARN_ON(host->current_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	host->current_req = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	emm_dma = prepare_ext_dma(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	addr = prepare_dma(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		dev_err(host->dev, "prepare_dma failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	host->dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			 MIO_EMM_INT_DMA_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (host->dmar_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		host->dmar_fixup(host, mrq->cmd, data, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * If we have a valid SD card in the slot, we set the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 * bit mask to check for CRC errors and timeouts only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	 * Otherwise, use the default power reset value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (mmc_card_sd(mmc->card))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	mrq->cmd->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (mrq->done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		mrq->done(mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	host->release_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		       SG_MITER_ATOMIC | SG_MITER_TO_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct sg_mapping_iter *smi = &host->smi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	unsigned int bytes_xfered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	int shift = 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	u64 dat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* Copy data to the xmit buffer before issuing the command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	/* Auto inc from offset zero, dbuf zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	for (bytes_xfered = 0; bytes_xfered < data_len;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		if (smi->consumed >= smi->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			if (!sg_miter_next(smi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			smi->consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		while (smi->consumed < smi->length && shift >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			bytes_xfered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			smi->consumed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			shift -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		if (shift < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			shift = 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			dat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	sg_miter_stop(smi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct cvm_mmc_host *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct mmc_command *cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	struct cvm_mmc_cr_mods mods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	u64 emm_cmd, rsp_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	int retries = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 * Note about locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	 * All MMC devices share the same bus and controller. Allow only a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	 * single user of the bootbus/MMC bus at a time. The lock is acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * on all entry points from the MMC layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * For requests the lock is only released after the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * interrupt!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	host->acquire_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		return cvm_mmc_dma_request(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	cvm_mmc_switch_to(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	mods = cvm_mmc_get_cr_mods(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	WARN_ON(host->current_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	host->current_req = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		if (cmd->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			do_read_request(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			do_write_request(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		if (cmd->data->timeout_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			set_wdog(slot, cmd->data->timeout_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		set_wdog(slot, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	set_bus_id(&emm_cmd, slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	writeq(0, host->base + MIO_EMM_STS_MASK(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	    rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	    rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	    rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		if (--retries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (!retries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	struct cvm_mmc_host *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int clk_period = 0, power_class = 10, bus_width = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	u64 clock, emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	host->acquire_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	cvm_mmc_switch_to(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	/* Set the power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		cvm_mmc_reset_bus(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		if (host->global_pwr_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			host->set_shared_power(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		else if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if (host->global_pwr_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			host->set_shared_power(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		else if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	/* Convert bus width to HW definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	switch (ios->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		bus_width = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		bus_width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		bus_width = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* DDR is available for 4/8 bit bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		bus_width |= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	/* Change the clock frequency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	clock = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (clock > 52000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		clock = 52000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	slot->clock = clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		clk_period = (host->sys_freq + clock - 1) / (2 * clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				(ios->timing == MMC_TIMING_MMC_HS)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	set_bus_id(&emm_switch, slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (!switch_val_changed(slot, emm_switch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	set_wdog(slot, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	do_switch(host, emm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	slot->cached_switch = emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	host->release_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) static const struct mmc_host_ops cvm_mmc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	.request        = cvm_mmc_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	.set_ios        = cvm_mmc_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	.get_ro		= mmc_gpio_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	.get_cd		= mmc_gpio_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct mmc_host *mmc = slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	clock = min(clock, mmc->f_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	clock = max(clock, mmc->f_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	slot->clock = clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	struct cvm_mmc_host *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	u64 emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	/* Enable this bus slot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	host->emm_cfg |= (1ull << slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	/* Program initial clock speed and power. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	cvm_mmc_set_clock(slot, slot->mmc->f_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 				 (host->sys_freq / slot->clock) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 				 (host->sys_freq / slot->clock) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/* Make the changes take effect on this bus slot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	set_bus_id(&emm_switch, slot->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	do_switch(host, emm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	slot->cached_switch = emm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * Set watchdog timeout value and default reset value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * for the mask register. Finally, set the CARD_RCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * bit so that we can get the card address relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * to the CMD register for CMD7 transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	set_wdog(slot, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	writeq(1, host->base + MIO_EMM_RCA(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct mmc_host *mmc = slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	u64 clock_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	ret = of_property_read_u32(node, "reg", &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		dev_err(dev, "Invalid reg property on %pOF\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	ret = mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	 * Legacy Octeon firmware has no regulator entry, fall-back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	 * a hard-coded voltage to get a sane OCR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* Common MMC bindings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/* Set bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		if (bus_width == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		else if (bus_width == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			mmc->caps |= MMC_CAP_4_BIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* Set maximum and minimum frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (!mmc->f_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!mmc->f_max || mmc->f_max > 52000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		mmc->f_max = 52000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	mmc->f_min = 400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/* Sampling register settings, period in picoseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	clock_period = 1000000000000ull / slot->host->sys_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct cvm_mmc_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	int ret, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	slot->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	slot->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	ret = cvm_mmc_of_parse(dev, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	id = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	/* Set up host parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	mmc->ops = &cvm_mmc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	 * We only have a 3.3v supply, we cannot support any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * of the UHS modes. We do support the high speed DDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * modes up to 52MHz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * Disable bounce buffers for max_segs = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		     MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (host->use_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		mmc->max_segs = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		mmc->max_segs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	/* DMA size field can address up to 8 MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				  dma_get_max_seg_size(host->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	mmc->max_req_size = mmc->max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	/* External DMA is in 512 byte blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	mmc->max_blk_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/* DMA block count field is 15 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	mmc->max_blk_count = 32767;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	slot->clock = mmc->f_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	slot->bus_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	slot->cached_rca = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	host->acquire_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	host->slot[id] = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	cvm_mmc_switch_to(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	cvm_mmc_init_lowlevel(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	host->release_bus(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		dev_err(dev, "mmc_add_host() returned %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		slot->host->slot[id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	mmc_free_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	mmc_remove_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	slot->host->slot[slot->bus_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	mmc_free_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }