^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mmc/sd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mmc/sdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define USDHI6_SD_CMD 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define USDHI6_SD_PORT_SEL 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define USDHI6_SD_ARG 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define USDHI6_SD_STOP 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define USDHI6_SD_SECCNT 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define USDHI6_SD_RSP10 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define USDHI6_SD_RSP32 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define USDHI6_SD_RSP54 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define USDHI6_SD_RSP76 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define USDHI6_SD_INFO1 0x0038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define USDHI6_SD_INFO2 0x003c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define USDHI6_SD_INFO1_MASK 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define USDHI6_SD_INFO2_MASK 0x0044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define USDHI6_SD_CLK_CTRL 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define USDHI6_SD_SIZE 0x004c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define USDHI6_SD_OPTION 0x0050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define USDHI6_SD_ERR_STS1 0x0058
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define USDHI6_SD_ERR_STS2 0x005c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define USDHI6_SD_BUF0 0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define USDHI6_SDIO_MODE 0x0068
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define USDHI6_SDIO_INFO1 0x006c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define USDHI6_SDIO_INFO1_MASK 0x0070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define USDHI6_CC_EXT_MODE 0x01b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define USDHI6_SOFT_RST 0x01c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define USDHI6_VERSION 0x01c4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define USDHI6_HOST_MODE 0x01c8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define USDHI6_SDIF_MODE 0x01cc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define USDHI6_SD_CMD_APP 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define USDHI6_SD_CMD_MODE_RSP_R2 0x0600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define USDHI6_SD_CMD_DATA 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define USDHI6_SD_CMD_READ 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define USDHI6_SD_CMD_MULTI 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define USDHI6_CC_EXT_MODE_SDRW BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define USDHI6_SD_INFO1_RSP_END BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define USDHI6_SD_INFO1_ACCESS_END BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define USDHI6_SD_INFO1_CARD_OUT BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define USDHI6_SD_INFO1_CARD_IN BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define USDHI6_SD_INFO1_CD BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define USDHI6_SD_INFO1_WP BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define USDHI6_SD_INFO1_D3_CARD_IN BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define USDHI6_SD_INFO2_CMD_ERR BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define USDHI6_SD_INFO2_CRC_ERR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define USDHI6_SD_INFO2_END_ERR BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define USDHI6_SD_INFO2_TOUT BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define USDHI6_SD_INFO2_IWA_ERR BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define USDHI6_SD_INFO2_IRA_ERR BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define USDHI6_SD_INFO2_RSP_TOUT BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define USDHI6_SD_INFO2_SDDAT0 BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define USDHI6_SD_INFO2_BRE BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define USDHI6_SD_INFO2_BWE BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define USDHI6_SD_INFO2_SCLKDIVEN BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define USDHI6_SD_INFO2_CBSY BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define USDHI6_SD_INFO2_ILA BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) USDHI6_SD_INFO2_ILA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) USDHI6_SD_INFO1_CARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define USDHI6_SD_STOP_STP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define USDHI6_SD_STOP_SEC BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define USDHI6_SDIO_INFO1_IOIRQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define USDHI6_SDIO_INFO1_EXPUB52 BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define USDHI6_SDIO_INFO1_EXWT BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define USDHI6_SOFT_RST_RESET BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define USDHI6_SD_OPTION_WIDTH_1 BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define USDHI6_MIN_DMA 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define USDHI6_REQ_TIMEOUT_MS 4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) enum usdhi6_wait_for {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) USDHI6_WAIT_FOR_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) USDHI6_WAIT_FOR_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) USDHI6_WAIT_FOR_MREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) USDHI6_WAIT_FOR_MWRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) USDHI6_WAIT_FOR_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) USDHI6_WAIT_FOR_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) USDHI6_WAIT_FOR_DATA_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) USDHI6_WAIT_FOR_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) USDHI6_WAIT_FOR_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct usdhi6_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void *mapped; /* mapped page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct usdhi6_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* SG memory handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Common for multiple and single block requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct usdhi6_page pg; /* current page from an SG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void *blk_page; /* either a mapped page, or the bounce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) size_t offset; /* offset within a page, including sg->offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Blocks, crossing a page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) size_t head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct usdhi6_page head_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct scatterlist bounce_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u8 bounce_buf[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Multiple block requests only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct scatterlist *sg; /* current SG segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int page_idx; /* page index within an SG segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) enum usdhi6_wait_for wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 status2_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 sdio_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 io_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long imclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bool app_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Timeout handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct delayed_work timeout_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* DMA support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct dma_chan *chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct dma_chan *chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bool dma_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Pin control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct pinctrl *pinctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct pinctrl_state *pins_uhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* I/O primitives */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) iowrite32(data, host->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) host->base, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) iowrite16(data, host->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) host->base, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 data = ioread32(host->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) host->base, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u16 data = ioread16(host->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) host->base, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void usdhi6_wait_for_resp(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) USDHI6_SD_INFO2_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void usdhi6_only_cd(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Mask all except card hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void usdhi6_mask_all(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) usdhi6_irq_enable(host, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int usdhi6_error_code(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u32 err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (host->io_error &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int opc = host->mrq ? host->mrq->cmd->opcode : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Response timeout is often normal, don't spam the log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (host->wait == USDHI6_WAIT_FOR_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dev_dbg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) err, rsp54, host->wait, opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) err, rsp54, host->wait, opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (host->io_error & USDHI6_SD_INFO2_ILA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Scatter-Gather management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * In PIO mode we have to map each page separately, using kmap(). That way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * have been observed with an SDIO WiFi card (b43 driver).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void usdhi6_blk_bounce(struct usdhi6_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) size_t blk_head = host->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) __func__, host->mrq->cmd->opcode, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) data->blksz, data->blocks, sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) host->head_pg.page = host->pg.page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) host->head_pg.mapped = host->pg.mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) host->pg.page = nth_page(host->pg.page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) host->pg.mapped = kmap(host->pg.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) host->blk_page = host->bounce_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) host->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) blk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) memcpy(host->bounce_buf + blk_head, host->pg.mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) data->blksz - blk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Only called for multiple block IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void usdhi6_sg_prep(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) host->sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* TODO: if we always map, this is redundant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) host->offset = host->sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Map the first page in an SG segment: common for multiple and single block IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void *usdhi6_sg_map(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) size_t head = PAGE_SIZE - sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) size_t blk_head = head % data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (WARN(sg_dma_len(sg) % data->blksz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) "SG size %u isn't a multiple of block size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) sg_dma_len(sg), data->blksz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) host->pg.page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) host->pg.mapped = kmap(host->pg.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) host->offset = sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Block size must be a power of 2 for multi-block transfers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * therefore blk_head is equal for all pages in this SG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) host->head_len = blk_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (head < data->blksz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * The first block in the SG crosses a page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Max blksz = 512, so blocks can only span 2 pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) usdhi6_blk_bounce(host, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) host->blk_page = host->pg.mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sg->offset, host->mrq->cmd->opcode, host->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return host->blk_page + host->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Unmap the current page: common for multiple and single block IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct page *page = host->head_pg.page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Previous block was cross-page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct scatterlist *sg = data->sg_len > 1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) host->sg : data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) size_t blk_head = host->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!data->error && data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) host->bounce_buf, blk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) memcpy(host->pg.mapped, host->bounce_buf + blk_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) data->blksz - blk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) host->head_pg.page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!force && sg_dma_len(sg) + sg->offset >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* More blocks in this SG, don't unmap the next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) page = host->pg.page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) host->pg.page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void usdhi6_sg_advance(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) size_t done, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* New offset: set at the end of the previous block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (host->head_pg.page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Finished a cross-page block, jump to the new page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) host->page_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) host->offset = data->blksz - host->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) host->blk_page = host->pg.mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) usdhi6_sg_unmap(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) host->offset += data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* The completed block didn't cross a page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (host->offset == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* If required, we'll map the page below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) host->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) host->page_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Now host->blk_page + host->offset point at the end of our last block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * and host->page_idx is the index of the page, in which our new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * is located, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) done = (host->page_idx << PAGE_SHIFT) + host->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) total = host->sg->offset + sg_dma_len(host->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) done, total, host->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (done < total && host->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* More blocks in this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (host->offset + data->blksz > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* We approached at a block, that spans 2 pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) usdhi6_blk_bounce(host, host->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Finished current page or an SG segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) usdhi6_sg_unmap(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (done == total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * End of an SG segment or the complete SG: jump to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * segment, we'll map it later in usdhi6_blk_read() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * usdhi6_blk_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct scatterlist *next = sg_next(host->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) host->page_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) host->wait = USDHI6_WAIT_FOR_DATA_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) host->sg = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (WARN(next && sg_dma_len(next) % data->blksz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) "SG size %u isn't a multiple of block size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) sg_dma_len(next), data->blksz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) data->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* We cannot get here after crossing a page border */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Next page in the same SG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) host->pg.mapped = kmap(host->pg.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) host->blk_page = host->pg.mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) host->mrq->cmd->opcode, host->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* DMA handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void usdhi6_dma_release(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (host->chan_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct dma_chan *chan = host->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (host->chan_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct dma_chan *chan = host->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!host->dma_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dma_unmap_sg(host->chan_rx->device->dev, data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) data->sg_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dma_unmap_sg(host->chan_tx->device->dev, data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) data->sg_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void usdhi6_dma_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct usdhi6_host *host = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev_name(mmc_dev(host->mmc)), mrq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mrq->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) usdhi6_dma_stop_unmap(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct scatterlist *sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct dma_async_tx_descriptor *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dma_cookie_t cookie = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) enum dma_data_direction data_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) data_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) data_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) host->dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) desc->callback = usdhi6_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) desc->callback_param = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) __func__, data->sg_len, ret, cookie, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (cookie < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* DMA failed, fall back to PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) usdhi6_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) "DMA failed: %d, falling back to PIO\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int usdhi6_dma_start(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!host->chan_rx || !host->chan_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (host->mrq->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void usdhi6_dma_kill(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) __func__, data->sg_len, data->blocks, data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Abort DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dmaengine_terminate_all(host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dmaengine_terminate_all(host->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void usdhi6_dma_check_error(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (host->io_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) data->error = usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) data->bytes_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) usdhi6_dma_kill(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) usdhi6_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) "DMA failed: %d, falling back to PIO\n", data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * The datasheet tells us to check a response from the card, whereas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * responses only come after the command phase, not after the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * phase. Let's check anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void usdhi6_dma_kick(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (host->mrq->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dma_async_issue_pending(host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dma_async_issue_pending(host->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct dma_slave_config cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) host->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (IS_ERR(host->chan_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) cfg.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) cfg.dst_addr = start + USDHI6_SD_BUF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cfg.src_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ret = dmaengine_slave_config(host->chan_tx, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto e_release_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (IS_ERR(host->chan_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) goto e_release_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cfg.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cfg.src_addr = cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cfg.dst_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = dmaengine_slave_config(host->chan_rx, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) goto e_release_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) e_release_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) dma_release_channel(host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) e_release_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dma_release_channel(host->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* API helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) unsigned long rate = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) for (i = 1000; i; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (host->imclk <= rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (ios->timing != MMC_TIMING_UHS_DDR50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Cannot have 1-to-1 clock in DDR mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) new_rate = host->imclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) val |= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) new_rate = host->imclk / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned long div =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) val |= div >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) new_rate = host->imclk / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (host->rate == new_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) host->rate = new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) rate, (val & 0xff) << 2, new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * if old or new rate is equal to input rate, have to switch the clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * off before changing and on after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (host->imclk == rate || host->imclk == host->rate || !rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) usdhi6_write(host, USDHI6_SD_CLK_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) host->rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (host->imclk == rate || host->imclk == host->rate ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) usdhi6_write(host, USDHI6_SD_CLK_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) val | USDHI6_SD_CLK_CTRL_SCLKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Errors ignored... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ios->power_mode ? ios->vdd : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static int usdhi6_reset(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) for (i = 1000; i; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return i ? 0 : -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct usdhi6_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u32 option, mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) usdhi6_set_power(host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) usdhi6_only_cd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * We only also touch USDHI6_SD_OPTION from .request(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * cannot race with MMC_POWER_UP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ret = usdhi6_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) usdhi6_set_power(host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) usdhi6_only_cd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) option = usdhi6_read(host, USDHI6_SD_OPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * The eMMC standard only allows 4 or 8 bits in the DDR mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * the same probably holds for SD cards. We check here anyway,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * since the datasheet explicitly requires 4 bits for DDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (ios->bus_width == MMC_BUS_WIDTH_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (ios->timing == MMC_TIMING_UHS_DDR50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dev_err(mmc_dev(mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) "4 bits are required for DDR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) option |= USDHI6_SD_OPTION_WIDTH_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) option &= ~USDHI6_SD_OPTION_WIDTH_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) mode = ios->timing == MMC_TIMING_UHS_DDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) usdhi6_write(host, USDHI6_SD_OPTION, option);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) usdhi6_write(host, USDHI6_SDIF_MODE, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (host->rate != ios->clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) usdhi6_clk_set(host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* This is data timeout. Response timeout is fixed to 640 clock cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static void usdhi6_timeout_set(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) unsigned long ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) mrq->data->timeout_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!ticks || ticks > 1 << 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* Max timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) val = 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) else if (ticks < 1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* Min timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) val = order_base_2(ticks) - 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) mrq->data ? "data" : "cmd", ticks, host->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Timeout Counter mask: 0xf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static void usdhi6_request_done(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (WARN(host->pg.page || host->head_pg.page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) data ? host->offset : 0, data ? data->blocks : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) data ? data->blksz : 0, data ? data->sg_len : 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) usdhi6_sg_unmap(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (mrq->cmd->error ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) (data && data->error) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (mrq->stop && mrq->stop->error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) __func__, mrq->cmd->opcode, data ? data->blocks : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) data ? data->blksz : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) mrq->cmd->error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) data ? data->error : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) mrq->stop ? mrq->stop->error : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) host->wait = USDHI6_WAIT_FOR_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) mmc_request_done(host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int usdhi6_cmd_flags(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct mmc_command *cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) u16 opc = cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (host->app_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) host->app_cmd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) opc |= USDHI6_SD_CMD_APP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (mrq->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) opc |= USDHI6_SD_CMD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (mrq->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) opc |= USDHI6_SD_CMD_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) (cmd->opcode == SD_IO_RW_EXTENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) mrq->data->blocks > 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) opc |= USDHI6_SD_CMD_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) switch (mmc_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) case MMC_RSP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case MMC_RSP_R1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) opc |= USDHI6_SD_CMD_MODE_RSP_R1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case MMC_RSP_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) case MMC_RSP_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) opc |= USDHI6_SD_CMD_MODE_RSP_R2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) case MMC_RSP_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) opc |= USDHI6_SD_CMD_MODE_RSP_R3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) "Unknown response type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mmc_resp_type(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return opc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static int usdhi6_rq_start(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct mmc_command *cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) int opc = usdhi6_cmd_flags(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (opc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return opc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) for (i = 1000; i; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) host->page_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) switch (data->blksz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) case 512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case 256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) data->blksz != 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) __func__, data->blocks, data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) (cmd->opcode == SD_IO_RW_EXTENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) data->blocks > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) usdhi6_sg_prep(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if ((data->blksz >= USDHI6_MIN_DMA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) data->blocks > 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) (data->blksz % 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) data->sg->offset % 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dev_dbg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) data->blksz, data->blocks, data->sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Enable DMA for USDHI6_MIN_DMA bytes or more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) use_dma = data->blksz >= USDHI6_MIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) !(data->blksz % 4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) dev_dbg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) __func__, cmd->opcode, data->blocks, data->blksz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) data->sg_len, use_dma ? "DMA" : "PIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) data->flags & MMC_DATA_READ ? "read" : "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) data->sg->offset, mrq->stop ? " + stop" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) __func__, cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* We have to get a command completion interrupt with DMA too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) usdhi6_wait_for_resp(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) host->wait = USDHI6_WAIT_FOR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) schedule_delayed_work(&host->timeout_work, host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* SEC bit is required to enable block counting by the core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) usdhi6_write(host, USDHI6_SD_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* Kick command execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) usdhi6_write(host, USDHI6_SD_CMD, opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct usdhi6_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) cancel_delayed_work_sync(&host->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) host->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) host->sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) usdhi6_timeout_set(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ret = usdhi6_rq_start(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mrq->cmd->error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) usdhi6_request_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int usdhi6_get_cd(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct usdhi6_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* Read is atomic, no need to lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * level status.CD CD_ACTIVE_HIGH card present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * 1 0 0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * 1 0 1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * 0 1 0 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * 0 1 1 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int usdhi6_get_ro(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct usdhi6_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* No locking as above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * level status.WP RO_ACTIVE_HIGH card read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * 1 0 0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * 1 0 1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * 0 1 0 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * 0 1 1 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct usdhi6_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) usdhi6_write(host, USDHI6_SDIO_MODE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) usdhi6_write(host, USDHI6_SDIO_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (IS_ERR(host->pins_uhs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) switch (voltage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) case MMC_SIGNAL_VOLTAGE_180:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case MMC_SIGNAL_VOLTAGE_120:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return pinctrl_select_state(host->pinctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) host->pins_uhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return pinctrl_select_default_state(mmc_dev(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = mmc_regulator_set_vqmmc(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dev_warn_once(mmc_dev(mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) "Failed to set pinstate err=%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static const struct mmc_host_ops usdhi6_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .request = usdhi6_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .set_ios = usdhi6_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) .get_cd = usdhi6_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) .get_ro = usdhi6_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) .enable_sdio_irq = usdhi6_enable_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) .start_signal_voltage_switch = usdhi6_sig_volt_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* State machine handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static void usdhi6_resp_cmd12(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct mmc_command *cmd = host->mrq->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void usdhi6_resp_read(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct mmc_command *cmd = host->mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) u32 *rsp = cmd->resp, tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * RSP10 39-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * RSP32 71-40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * RSP54 103-72
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * RSP76 127-104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * R2-type response:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * resp[0] = r[127..96]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * resp[1] = r[95..64]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * resp[2] = r[63..32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * resp[3] = r[31..0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * Other responses:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * resp[0] = r[39..8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (mmc_resp_type(cmd) == MMC_RSP_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) "CMD%d: response expected but is missing!\n", cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (mmc_resp_type(cmd) & MMC_RSP_136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rsp[3 - i] = tmp >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) rsp[3 - i] |= tmp << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* Read RSP54 to avoid conflict with auto CMD12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static int usdhi6_blk_read(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) int i, rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (host->io_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) data->error = usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (host->pg.page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) p = host->blk_page + host->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) p = usdhi6_sg_map(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) data->error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) for (i = 0; i < data->blksz / 4; i++, p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) *p = usdhi6_read(host, USDHI6_SD_BUF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) rest = data->blksz % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) for (i = 0; i < (rest + 1) / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ((u8 *)p)[2 * i] = ((u8 *)&d)[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (rest > 1 && !i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) host->wait = USDHI6_WAIT_FOR_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return data->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static int usdhi6_blk_write(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) int i, rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (host->io_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) data->error = usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (host->pg.page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) p = host->blk_page + host->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) p = usdhi6_sg_map(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) data->error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) for (i = 0; i < data->blksz / 4; i++, p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) usdhi6_write(host, USDHI6_SD_BUF0, *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) rest = data->blksz % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) for (i = 0; i < (rest + 1) / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) u16 d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ((u8 *)&d)[0] = ((u8 *)p)[2 * i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (rest > 1 && !i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) ((u8 *)&d)[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) usdhi6_write16(host, USDHI6_SD_BUF0, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) host->wait = USDHI6_WAIT_FOR_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return data->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static int usdhi6_stop_cmd(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) switch (mrq->cmd->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) case MMC_READ_MULTIPLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case MMC_WRITE_MULTIPLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) host->wait = USDHI6_WAIT_FOR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) fallthrough; /* Unsupported STOP command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) "unsupported stop CMD%d for CMD%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) mrq->stop->opcode, mrq->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) mrq->stop->error = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static bool usdhi6_end_cmd(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct mmc_command *cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (host->io_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) cmd->error = usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) usdhi6_resp_read(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (!mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (host->dma_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) usdhi6_dma_kick(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (!mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) host->wait = USDHI6_WAIT_FOR_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) else if (usdhi6_stop_cmd(host) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) } else if (mrq->data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) (cmd->opcode == SD_IO_RW_EXTENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) mrq->data->blocks > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) host->wait = USDHI6_WAIT_FOR_MREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) host->wait = USDHI6_WAIT_FOR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) (cmd->opcode == SD_IO_RW_EXTENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) mrq->data->blocks > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) host->wait = USDHI6_WAIT_FOR_MWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) host->wait = USDHI6_WAIT_FOR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static bool usdhi6_read_block(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* ACCESS_END IRQ is already unmasked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int ret = usdhi6_blk_read(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * Have to force unmapping both pages: the single block could have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * cross-page, in which case for single-block IO host->page_idx == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * So, if we don't force, the second page won't be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) usdhi6_sg_unmap(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) host->wait = USDHI6_WAIT_FOR_DATA_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static bool usdhi6_mread_block(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int ret = usdhi6_blk_read(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) usdhi6_sg_advance(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return !host->mrq->data->error &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static bool usdhi6_write_block(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) int ret = usdhi6_blk_write(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /* See comment in usdhi6_read_block() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) usdhi6_sg_unmap(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) host->wait = USDHI6_WAIT_FOR_DATA_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) static bool usdhi6_mwrite_block(struct usdhi6_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int ret = usdhi6_blk_write(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) usdhi6_sg_advance(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return !host->mrq->data->error &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* Interrupt & timeout handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct usdhi6_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) bool io_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) cancel_delayed_work_sync(&host->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (!mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) switch (host->wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) case USDHI6_WAIT_FOR_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* We're too late, the timeout has already kicked in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) case USDHI6_WAIT_FOR_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* Wait for data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) io_wait = usdhi6_end_cmd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case USDHI6_WAIT_FOR_MREAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* Wait for more data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) io_wait = usdhi6_mread_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) case USDHI6_WAIT_FOR_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /* Wait for data end? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) io_wait = usdhi6_read_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) case USDHI6_WAIT_FOR_MWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Wait data to write? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) io_wait = usdhi6_mwrite_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) case USDHI6_WAIT_FOR_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* Wait for data end? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) io_wait = usdhi6_write_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) case USDHI6_WAIT_FOR_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) usdhi6_dma_check_error(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) case USDHI6_WAIT_FOR_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) usdhi6_write(host, USDHI6_SD_STOP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (host->io_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) int ret = usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) mrq->stop->error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) mrq->data->error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) usdhi6_resp_cmd12(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) mrq->stop->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) case USDHI6_WAIT_FOR_DATA_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (host->io_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) mrq->data->error = usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) mrq->data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) cmd->error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) usdhi6_request_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (io_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) schedule_delayed_work(&host->timeout_work, host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* Wait for more data or ACCESS_END */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!host->dma_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (!cmd->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (!data->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (host->wait != USDHI6_WAIT_FOR_STOP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) host->mrq->stop &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) !host->mrq->stop->error &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) !usdhi6_stop_cmd(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* Sending STOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) usdhi6_wait_for_resp(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) schedule_delayed_work(&host->timeout_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* Data error: might need to unmap the last page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) usdhi6_sg_unmap(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) } else if (cmd->opcode == MMC_APP_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) host->app_cmd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) usdhi6_request_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static irqreturn_t usdhi6_sd(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct usdhi6_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) u16 status, status2, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) ~USDHI6_SD_INFO1_CARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) usdhi6_only_cd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dev_dbg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!status && !status2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) error = status2 & USDHI6_SD_INFO2_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /* Ack / clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (USDHI6_SD_INFO1_IRQ & status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) usdhi6_write(host, USDHI6_SD_INFO1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (USDHI6_SD_INFO2_IRQ & status2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* In error cases BWE and BRE aren't cleared automatically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) usdhi6_write(host, USDHI6_SD_INFO2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) host->io_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) host->irq_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /* Don't pollute the log with unsupported command timeouts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (host->wait != USDHI6_WAIT_FOR_CMD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) error != USDHI6_SD_INFO2_RSP_TOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) "%s(): INFO2 error bits 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dev_dbg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) "%s(): INFO2 error bits 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct usdhi6_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) mmc_signal_sdio_irq(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static irqreturn_t usdhi6_cd(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct usdhi6_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* We're only interested in hotplug events here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) USDHI6_SD_INFO1_CARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* Ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) usdhi6_write(host, USDHI6_SD_INFO1, ~status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (!work_pending(&mmc->detect.work) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) !mmc->card) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) mmc->card)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) mmc_detect_change(mmc, msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * Actually this should not be needed, if the built-in timeout works reliably in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * handler might be the only way to catch the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static void usdhi6_timeout_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct delayed_work *d = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct mmc_data *data = mrq ? mrq->data : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) dev_warn(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) host->dma_active ? "DMA" : "PIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) host->wait, mrq ? mrq->cmd->opcode : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) usdhi6_read(host, USDHI6_SD_INFO1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (host->dma_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) usdhi6_dma_kill(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) usdhi6_dma_stop_unmap(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) switch (host->wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) fallthrough; /* mrq can be NULL, but is impossible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) case USDHI6_WAIT_FOR_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) mrq->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) case USDHI6_WAIT_FOR_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) mrq->stop->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case USDHI6_WAIT_FOR_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case USDHI6_WAIT_FOR_MREAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case USDHI6_WAIT_FOR_MWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) case USDHI6_WAIT_FOR_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) case USDHI6_WAIT_FOR_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) sg = host->sg ?: data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) dev_dbg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) host->offset, data->blocks, data->blksz, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) sg_dma_len(sg), sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) usdhi6_sg_unmap(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) fallthrough; /* page unmapped in USDHI6_WAIT_FOR_DATA_END */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) case USDHI6_WAIT_FOR_DATA_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) usdhi6_error_code(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) usdhi6_request_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* Probe / release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static const struct of_device_id usdhi6_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {.compatible = "renesas,usdhi6rol0"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) MODULE_DEVICE_TABLE(of, usdhi6_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static int usdhi6_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct usdhi6_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int irq_cd, irq_sd, irq_sdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) irq_cd = platform_get_irq_byname(pdev, "card detect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) irq_sd = platform_get_irq_byname(pdev, "data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) irq_sdio = platform_get_irq_byname(pdev, "SDIO");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (irq_sd < 0 || irq_sdio < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) ret = mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) goto e_free_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) goto e_free_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) host->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) host->wait = USDHI6_WAIT_FOR_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * We use a fixed timeout of 4s, hence inform the core about it. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * future improvement should instead respect the cmd->busy_timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) host->pinctrl = devm_pinctrl_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (IS_ERR(host->pinctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ret = PTR_ERR(host->pinctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto e_free_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) host->base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (IS_ERR(host->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ret = PTR_ERR(host->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) goto e_free_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) host->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (IS_ERR(host->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) ret = PTR_ERR(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) goto e_free_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) host->imclk = clk_get_rate(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ret = clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) goto e_free_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) version = usdhi6_read(host, USDHI6_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if ((version & 0xfff) != 0xa0d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) dev_err(dev, "Version not recognized %x\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) goto e_clk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) usdhi6_mask_all(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (irq_cd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) dev_name(dev), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) goto e_clk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) mmc->caps |= MMC_CAP_NEEDS_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) dev_name(dev), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto e_clk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) dev_name(dev), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) goto e_clk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) usdhi6_dma_request(host, res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) mmc->ops = &usdhi6_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) MMC_CAP_SDIO_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /* Set .max_segs to some random number. Feel free to adjust. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) mmc->max_segs = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) mmc->max_blk_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * Setting .max_seg_size to 1 page would simplify our page-mapping code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * But OTOH, having large segments makes DMA more efficient. We could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * check, whether we managed to get DMA and fall back to 1 page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * segments, but if we do manage to obtain DMA and then it fails at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * run-time and we fall back to PIO, we will continue getting large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * segments. So, we wouldn't be able to get rid of the code anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) mmc->max_seg_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (!mmc->f_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) mmc->f_max = host->imclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) mmc->f_min = host->imclk / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) platform_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) goto e_release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) e_release_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) usdhi6_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) e_clk_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) e_free_mmc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static int usdhi6_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct usdhi6_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) mmc_remove_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) usdhi6_mask_all(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) cancel_delayed_work_sync(&host->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) usdhi6_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) mmc_free_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static struct platform_driver usdhi6_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) .probe = usdhi6_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) .remove = usdhi6_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) .name = "usdhi6rol0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) .of_match_table = usdhi6_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) module_platform_driver(usdhi6_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) MODULE_ALIAS("platform:usdhi6rol0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");