^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Thanks to the following companies for their support:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * - JMicron (hardware and technical support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/leds.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mmc/card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mmc/sdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <trace/hooks/mmc_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "sdhci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DRIVER_NAME "sdhci"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DBG(f, x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SDHCI_DUMP(f, x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MAX_TUNING_LOOP 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static unsigned int debug_quirks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static unsigned int debug_quirks2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void sdhci_dumpregs(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) sdhci_readl(host, SDHCI_DMA_ADDRESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) sdhci_readw(host, SDHCI_HOST_VERSION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) sdhci_readw(host, SDHCI_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) sdhci_readw(host, SDHCI_BLOCK_COUNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sdhci_readl(host, SDHCI_ARGUMENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) sdhci_readw(host, SDHCI_TRANSFER_MODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sdhci_readl(host, SDHCI_PRESENT_STATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sdhci_readb(host, SDHCI_HOST_CONTROL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) sdhci_readb(host, SDHCI_POWER_CONTROL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sdhci_readw(host, SDHCI_CLOCK_CONTROL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) sdhci_readl(host, SDHCI_INT_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) sdhci_readl(host, SDHCI_INT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) sdhci_readl(host, SDHCI_CAPABILITIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sdhci_readl(host, SDHCI_CAPABILITIES_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) sdhci_readw(host, SDHCI_COMMAND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) sdhci_readl(host, SDHCI_MAX_CURRENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) sdhci_readl(host, SDHCI_RESPONSE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) sdhci_readl(host, SDHCI_RESPONSE + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sdhci_readl(host, SDHCI_RESPONSE + 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) sdhci_readl(host, SDHCI_RESPONSE + 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) SDHCI_DUMP("Host ctl2: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) sdhci_readw(host, SDHCI_HOST_CONTROL2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (host->flags & SDHCI_USE_64_BIT_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) sdhci_readl(host, SDHCI_ADMA_ERROR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) sdhci_readl(host, SDHCI_ADMA_ADDRESS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) sdhci_readl(host, SDHCI_ADMA_ERROR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) sdhci_readl(host, SDHCI_ADMA_ADDRESS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (host->ops->dump_vendor_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) host->ops->dump_vendor_regs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) SDHCI_DUMP("============================================\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) EXPORT_SYMBOL_GPL(sdhci_dumpregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Low level functions *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u16 ctrl2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (ctrl2 & SDHCI_CTRL_V4_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ctrl2 |= SDHCI_CTRL_V4_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * This can be called before sdhci_add_host() by Vendor's host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * driver to enable v4 mode if supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void sdhci_enable_v4_mode(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) host->v4_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sdhci_do_enable_v4_mode(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return cmd->data || cmd->flags & MMC_RSP_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) SDHCI_CARD_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) host->ier |= present ? SDHCI_INT_CARD_REMOVE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) SDHCI_INT_CARD_INSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void sdhci_enable_card_detection(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) sdhci_set_card_detection(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void sdhci_disable_card_detection(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) sdhci_set_card_detection(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (host->bus_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) host->bus_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) pm_runtime_get_noresume(host->mmc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!host->bus_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) host->bus_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pm_runtime_put_noidle(host->mmc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void sdhci_reset(struct sdhci_host *host, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (mask & SDHCI_RESET_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) host->clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Reset-all turns off SD Bus Power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sdhci_runtime_pm_bus_off(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Wait max 100 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) timeout = ktime_add_ms(ktime_get(), 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* hw clears the bit when it's done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bool timedout = ktime_after(ktime_get(), timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pr_err("%s: Reset 0x%x never completed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mmc_hostname(host->mmc), (int)mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) EXPORT_SYMBOL_GPL(sdhci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!mmc->ops->get_cd(mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) host->ops->reset(host, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (mask & SDHCI_RESET_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (host->ops->enable_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) host->ops->enable_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* Resetting the controller clears many */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) host->preset_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void sdhci_set_default_irqs(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) SDHCI_INT_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) host->tuning_mode == SDHCI_TUNING_MODE_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) host->ier |= SDHCI_INT_RETUNE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void sdhci_config_dma(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u16 ctrl2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (host->version < SDHCI_SPEC_200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Always adjust the DMA selection as some controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * (e.g. JMicron) can't do PIO properly when the selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * is ADMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ctrl &= ~SDHCI_CTRL_DMA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!(host->flags & SDHCI_REQ_USE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Note if DMA Select is zero then SDMA is selected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (host->flags & SDHCI_USE_ADMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ctrl |= SDHCI_CTRL_ADMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (host->flags & SDHCI_USE_64_BIT_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * If v4 mode, all supported DMA can be 64-bit addressing if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * controller supports 64-bit system address, otherwise only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * ADMA can support 64-bit addressing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (host->v4_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) } else if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Don't need to undo SDHCI_CTRL_ADMA32 in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * set SDHCI_CTRL_ADMA64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ctrl |= SDHCI_CTRL_ADMA64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void sdhci_init(struct sdhci_host *host, int soft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (soft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sdhci_do_reset(host, SDHCI_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (host->v4_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sdhci_do_enable_v4_mode(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) sdhci_set_default_irqs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) host->cqe_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (soft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* force clock reconfiguration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) host->clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mmc->ops->set_ios(mmc, &mmc->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static void sdhci_reinit(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sdhci_init(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) sdhci_enable_card_detection(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * A change to the card detect bits indicates a change in present state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * refer sdhci_set_card_detection(). A card detect interrupt might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * been missed while the host controller was being reset, so trigger a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * rescan to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mmc_detect_change(host->mmc, msecs_to_jiffies(200));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void __sdhci_led_activate(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (host->quirks & SDHCI_QUIRK_NO_LED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ctrl |= SDHCI_CTRL_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void __sdhci_led_deactivate(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (host->quirks & SDHCI_QUIRK_NO_LED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ctrl &= ~SDHCI_CTRL_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #if IS_REACHABLE(CONFIG_LEDS_CLASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void sdhci_led_control(struct led_classdev *led,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) enum led_brightness brightness)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct sdhci_host *host = container_of(led, struct sdhci_host, led);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (host->runtime_suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (brightness == LED_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) __sdhci_led_deactivate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) __sdhci_led_activate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int sdhci_led_register(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (host->quirks & SDHCI_QUIRK_NO_LED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) snprintf(host->led_name, sizeof(host->led_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) "%s::", mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) host->led.name = host->led_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) host->led.brightness = LED_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) host->led.default_trigger = mmc_hostname(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) host->led.brightness_set = sdhci_led_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return led_classdev_register(mmc_dev(mmc), &host->led);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void sdhci_led_unregister(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (host->quirks & SDHCI_QUIRK_NO_LED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) led_classdev_unregister(&host->led);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static inline void sdhci_led_activate(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static inline void sdhci_led_deactivate(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static inline int sdhci_led_register(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline void sdhci_led_unregister(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static inline void sdhci_led_activate(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) __sdhci_led_activate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static inline void sdhci_led_deactivate(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) __sdhci_led_deactivate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (sdhci_data_line_cmd(mrq->cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mod_timer(&host->data_timer, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mod_timer(&host->timer, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (sdhci_data_line_cmd(mrq->cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) del_timer(&host->data_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) del_timer(&host->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static inline bool sdhci_has_requests(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return host->cmd || host->data_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Core functions *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void sdhci_read_block_pio(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) size_t blksize, len, chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u32 scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) DBG("PIO reading\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) blksize = host->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) while (blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) BUG_ON(!sg_miter_next(&host->sg_miter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) len = min(host->sg_miter.length, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) blksize -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) host->sg_miter.consumed = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) buf = host->sg_miter.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (chunk == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) scratch = sdhci_readl(host, SDHCI_BUFFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) chunk = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) *buf = scratch & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) scratch >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) chunk--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sg_miter_stop(&host->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void sdhci_write_block_pio(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) size_t blksize, len, chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u32 scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) DBG("PIO writing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) blksize = host->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) scratch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) while (blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) BUG_ON(!sg_miter_next(&host->sg_miter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) len = min(host->sg_miter.length, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) blksize -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) host->sg_miter.consumed = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) buf = host->sg_miter.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) scratch |= (u32)*buf << (chunk * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) chunk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sdhci_writel(host, scratch, SDHCI_BUFFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) scratch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) sg_miter_stop(&host->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void sdhci_transfer_pio(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (host->blocks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (host->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) mask = SDHCI_DATA_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mask = SDHCI_SPACE_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Some controllers (JMicron JMB38x) mess up the buffer bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * for transfers < 4 bytes. As long as it is just one block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * we can ignore the bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) (host->data->blocks == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) mask = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (host->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sdhci_read_block_pio(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) sdhci_write_block_pio(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) host->blocks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (host->blocks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) DBG("PIO transfer complete.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static int sdhci_pre_dma_transfer(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct mmc_data *data, int cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * If the data buffers are already mapped, return the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * dma_map_sg() result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (data->host_cookie == COOKIE_PRE_MAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return data->sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Bounce write requests to the bounce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (host->bounce_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) unsigned int length = data->blksz * data->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (length > host->bounce_buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mmc_hostname(host->mmc), length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) host->bounce_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Copy the data to the bounce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (host->ops->copy_to_bounce_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) host->ops->copy_to_bounce_buffer(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) data, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) sg_copy_to_buffer(data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) host->bounce_buffer, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* Switch ownership to the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dma_sync_single_for_device(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) host->bounce_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) host->bounce_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Just a dummy value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) sg_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Just access the data directly from memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) sg_count = dma_map_sg(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (sg_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) data->sg_count = sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) data->host_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) local_irq_save(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return kmap_atomic(sg_page(sg)) + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) kunmap_atomic(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) local_irq_restore(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dma_addr_t addr, int len, unsigned int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct sdhci_adma2_64_desc *dma_desc = *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* 32-bit and 64-bit descriptors have these members in same position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dma_desc->cmd = cpu_to_le16(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dma_desc->len = cpu_to_le16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (host->flags & SDHCI_USE_64_BIT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) *desc += host->desc_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) void **desc, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int len, unsigned int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (host->ops->adma_write_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) host->ops->adma_write_desc(host, desc, addr, len, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) sdhci_adma_write_desc(host, desc, addr, len, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void sdhci_adma_mark_end(void *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct sdhci_adma2_64_desc *dma_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* 32-bit and 64-bit descriptors have 'cmd' in same position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dma_desc->cmd |= cpu_to_le16(ADMA2_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static void sdhci_adma_table_pre(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct mmc_data *data, int sg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) dma_addr_t addr, align_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) void *desc, *align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int len, offset, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * The spec does not specify endianness of descriptor table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * We currently guess that it is LE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) host->sg_count = sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) desc = host->adma_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) align = host->align_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) align_addr = host->align_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) for_each_sg(data->sg, sg, host->sg_count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * The SDHCI specification states that ADMA addresses must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * be 32-bit aligned. If they aren't, then we use a bounce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * buffer for the (up to three) bytes that screw up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) SDHCI_ADMA2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (data->flags & MMC_DATA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) buffer = sdhci_kmap_atomic(sg, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) memcpy(align, buffer, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) sdhci_kunmap_atomic(buffer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* tran, valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) __sdhci_adma_write_desc(host, &desc, align_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) offset, ADMA2_TRAN_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) BUG_ON(offset > 65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) align += SDHCI_ADMA2_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) align_addr += SDHCI_ADMA2_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) addr += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) len -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) BUG_ON(len > 65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* tran, valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) __sdhci_adma_write_desc(host, &desc, addr, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ADMA2_TRAN_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * If this triggers then we have a calculation bug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * somewhere. :/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Mark the last descriptor as the terminating descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (desc != host->adma_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) desc -= host->desc_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) sdhci_adma_mark_end(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Add a terminating entry - nop, end, valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static void sdhci_adma_table_post(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) void *align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) bool has_unaligned = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Do a quick scan of the SG list for any unaligned mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) for_each_sg(data->sg, sg, host->sg_count, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) has_unaligned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (has_unaligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) data->sg_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) align = host->align_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) for_each_sg(data->sg, sg, host->sg_count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) size = SDHCI_ADMA2_ALIGN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) buffer = sdhci_kmap_atomic(sg, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) memcpy(buffer, align, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) sdhci_kunmap_atomic(buffer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) align += SDHCI_ADMA2_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (host->flags & SDHCI_USE_64_BIT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (host->bounce_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return host->bounce_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return sg_dma_address(host->data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (host->v4_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) sdhci_set_adma_addr(host, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static unsigned int sdhci_target_timeout(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct mmc_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unsigned int target_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* timeout in us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) target_timeout = cmd->busy_timeout * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (host->clock && data->timeout_clks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) unsigned long long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * data->timeout_clks is in units of clock cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * host->clock is in Hz. target_timeout is in us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * Hence, us = 1000000 * cycles / Hz. Round up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) val = 1000000ULL * data->timeout_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (do_div(val, host->clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) target_timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) target_timeout += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return target_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static void sdhci_calc_sw_timeout(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct mmc_ios *ios = &mmc->ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned char bus_width = 1 << ios->bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unsigned int blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) unsigned int freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) u64 target_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) u64 transfer_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) target_timeout = sdhci_target_timeout(host, cmd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) target_timeout *= NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) blksz = data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) freq = host->mmc->actual_clock ? : host->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) do_div(transfer_time, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* multiply by '2' to account for any unknowns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) transfer_time = transfer_time * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* calculate timeout for the entire data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) host->data_timeout = data->blocks * target_timeout +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) transfer_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) host->data_timeout = target_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (host->data_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) host->data_timeout += MMC_CMD_TRANSFER_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) bool *too_big)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u8 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) unsigned target_timeout, current_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) *too_big = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * If the host controller provides us with an incorrect timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * value, just skip the check and use 0xE. The hardware may take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * longer to time out, but that's much better than having a too-short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * timeout value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 0xE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Unspecified command, asume max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (cmd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return 0xE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* Unspecified timeout, assume max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!data && !cmd->busy_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0xE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* timeout in us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) target_timeout = sdhci_target_timeout(host, cmd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * Figure out needed cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * We do this in steps in order to fit inside a 32 bit int.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * The first step is the minimum timeout, which will have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * minimum resolution of 6 bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * (1) 2^13*1000 > 2^22,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * (2) host->timeout_clk < 2^16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * =>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * (1) / (2) > 2^6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) current_timeout = (1 << 13) * 1000 / host->timeout_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) while (current_timeout < target_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) current_timeout <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (count >= 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (count >= 0xF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) DBG("Too large timeout 0x%x requested for CMD%d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) count, cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) count = 0xE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) *too_big = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static void sdhci_set_transfer_irqs(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (host->flags & SDHCI_REQ_USE_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) host->ier = (host->ier & ~pio_irqs) | dma_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) host->ier = (host->ier & ~dma_irqs) | pio_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) host->ier |= SDHCI_INT_AUTO_CMD_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) host->ier |= SDHCI_INT_DATA_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) bool too_big = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) u8 count = sdhci_calc_timeout(host, cmd, &too_big);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (too_big &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) sdhci_calc_sw_timeout(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sdhci_set_data_timeout_irq(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) sdhci_set_data_timeout_irq(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (host->ops->set_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) host->ops->set_timeout(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) __sdhci_set_timeout(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void sdhci_initialize_data(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) WARN_ON(host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) BUG_ON(data->blksz * data->blocks > 524288);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) BUG_ON(data->blksz > host->mmc->max_blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) BUG_ON(data->blocks > 65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) host->data_early = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) host->data->bytes_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static inline void sdhci_set_block_info(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* Set the DMA boundary value and block size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sdhci_writew(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) SDHCI_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * can be supported, in that case 16-bit block count register must be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) sdhci_initialize_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) unsigned int length_mask, offset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) host->flags |= SDHCI_REQ_USE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * FIXME: This doesn't account for merging when mapping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * scatterlist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * The assumption here being that alignment and lengths are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * the same after DMA mapping to device address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) length_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) offset_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) length_mask = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * As we use up to 3 byte chunks to work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * around alignment problems, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * check the offset as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) offset_mask = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) length_mask = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) offset_mask = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (unlikely(length_mask | offset_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) for_each_sg(data->sg, sg, data->sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (sg->length & length_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) DBG("Reverting to PIO because of transfer size (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) host->flags &= ~SDHCI_REQ_USE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (sg->offset & offset_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) DBG("Reverting to PIO because of bad alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) host->flags &= ~SDHCI_REQ_USE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (host->flags & SDHCI_REQ_USE_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (sg_cnt <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * This only happens when someone fed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * us an invalid request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) host->flags &= ~SDHCI_REQ_USE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) } else if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) sdhci_adma_table_pre(host, data, sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) sdhci_set_adma_addr(host, host->adma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) WARN_ON(sg_cnt != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) sdhci_config_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!(host->flags & SDHCI_REQ_USE_DMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) flags = SG_MITER_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (host->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) flags |= SG_MITER_TO_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) flags |= SG_MITER_FROM_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) host->blocks = data->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) sdhci_set_transfer_irqs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) sdhci_set_block_info(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static int sdhci_external_dma_init(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) host->tx_chan = dma_request_chan(mmc->parent, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (IS_ERR(host->tx_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ret = PTR_ERR(host->tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) pr_warn("Failed to request TX DMA channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) host->tx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) host->rx_chan = dma_request_chan(mmc->parent, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (IS_ERR(host->rx_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (host->tx_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) dma_release_channel(host->tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) host->tx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ret = PTR_ERR(host->rx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) pr_warn("Failed to request RX DMA channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) host->rx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int sdhci_external_dma_setup(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) int sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (!host->mapbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) memset(&cfg, 0, sizeof(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) cfg.src_addr = host->mapbase + SDHCI_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) cfg.src_maxburst = data->blksz / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) cfg.dst_maxburst = data->blksz / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* Sanity check: all the SG entries must be aligned by block size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) for (i = 0; i < data->sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if ((data->sg + i)->length % data->blksz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) chan = sdhci_external_dma_channel(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ret = dmaengine_slave_config(chan, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (sg_cnt <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) desc->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) desc->callback_param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (dma_submit_error(cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ret = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void sdhci_external_dma_release(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (host->tx_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dma_release_channel(host->tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) host->tx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (host->rx_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dma_release_channel(host->rx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) host->rx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) sdhci_switch_external_dma(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) sdhci_initialize_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) host->flags |= SDHCI_REQ_USE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) sdhci_set_transfer_irqs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) sdhci_set_block_info(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!sdhci_external_dma_setup(host, cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) __sdhci_external_dma_prepare_data(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) sdhci_external_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) sdhci_prepare_data(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (!cmd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) chan = sdhci_external_dma_channel(host, cmd->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static inline int sdhci_external_dma_init(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static inline void sdhci_external_dma_release(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /* This should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) host->use_external_dma = en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) !mrq->cap_cmd_during_tfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct mmc_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) u16 *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) (cmd->opcode != SD_IO_RW_EXTENDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) u16 ctrl2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * In case of Version 4.10 or later, use of 'Auto CMD Auto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * Select' is recommended rather than use of 'Auto CMD12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * here because some controllers (e.g sdhci-of-dwmshc) expect it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) (use_cmd12 || use_cmd23)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) *mode |= SDHCI_TRNS_AUTO_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (use_cmd23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ctrl2 |= SDHCI_CMD23_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ctrl2 &= ~SDHCI_CMD23_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * If we are sending CMD23, CMD12 never gets sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * on successful completion (so no Auto-CMD12).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (use_cmd12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) *mode |= SDHCI_TRNS_AUTO_CMD12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) else if (use_cmd23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) *mode |= SDHCI_TRNS_AUTO_CMD23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static void sdhci_set_transfer_mode(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) u16 mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (host->quirks2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* must not clear SDHCI_TRANSFER_MODE when tuning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /* clear Auto CMD settings for no data CMDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) WARN_ON(!host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) mode = SDHCI_TRNS_BLK_CNT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) sdhci_auto_cmd_select(host, cmd, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (sdhci_auto_cmd23(host, cmd->mrq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) mode |= SDHCI_TRNS_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (host->flags & SDHCI_REQ_USE_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mode |= SDHCI_TRNS_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return (!(host->flags & SDHCI_DEVICE_DEAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ((mrq->cmd && mrq->cmd->error) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) (mrq->sbc && mrq->sbc->error) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) for (i = 0; i < SDHCI_MAX_MRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (host->mrqs_done[i] == mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) for (i = 0; i < SDHCI_MAX_MRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!host->mrqs_done[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) host->mrqs_done[i] = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) WARN_ON(i >= SDHCI_MAX_MRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (host->cmd && host->cmd->mrq == mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (host->data_cmd && host->data_cmd->mrq == mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) host->data_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) host->deferred_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (host->data && host->data->mrq == mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (sdhci_needs_reset(host, mrq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) host->pending_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) sdhci_set_mrq_done(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) sdhci_del_timer(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!sdhci_has_requests(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) sdhci_led_deactivate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) __sdhci_finish_mrq(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) queue_work(host->complete_wq, &host->complete_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct mmc_command *data_cmd = host->data_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) host->data_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * The controller needs a reset of internal state machines upon error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (data->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!host->cmd || host->cmd == data_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) sdhci_do_reset(host, SDHCI_RESET_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) sdhci_do_reset(host, SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) sdhci_adma_table_post(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * The specification states that the block count register must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * be updated, but it does not specify at what point in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * data flow. That makes the register entirely useless to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * back so we have to assume that nothing made it to the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * in the event of an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (data->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) data->bytes_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) data->bytes_xfered = data->blksz * data->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Need to send CMD12 if -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * b) error in multiblock transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (data->stop &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) data->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * 'cap_cmd_during_tfr' request must not use the command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * after mmc_command_done() has been called. It is upper layer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * responsibility to send the stop command if required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (data->mrq->cap_cmd_during_tfr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) __sdhci_finish_mrq(host, data->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /* Avoid triggering warning in sdhci_send_command() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (!sdhci_send_command(host, data->stop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (sw_data_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * This is anyway a sw data timeout, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * give up now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) data->stop->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) __sdhci_finish_mrq(host, data->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) WARN_ON(host->deferred_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) host->deferred_cmd = data->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) __sdhci_finish_mrq(host, data->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static void sdhci_finish_data(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) __sdhci_finish_data(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) WARN_ON(host->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* Initially, a command has no error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) cmd->opcode == MMC_STOP_TRANSMISSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) cmd->flags |= MMC_RSP_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) mask = SDHCI_CMD_INHIBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (sdhci_data_line_cmd(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) mask |= SDHCI_DATA_INHIBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* We shouldn't wait for data inihibit for stop commands, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) though they might use busy signaling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) mask &= ~SDHCI_DATA_INHIBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) host->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) host->data_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (sdhci_data_line_cmd(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) WARN_ON(host->data_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) host->data_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sdhci_set_timeout(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (host->use_external_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) sdhci_external_dma_prepare_data(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) sdhci_prepare_data(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) sdhci_set_transfer_mode(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) WARN_ONCE(1, "Unsupported response type!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * This does not happen in practice because 136-bit response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * commands never have busy waiting, so rather than complicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * the error path, just remove busy waiting and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) cmd->flags &= ~MMC_RSP_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (!(cmd->flags & MMC_RSP_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) flags = SDHCI_CMD_RESP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) else if (cmd->flags & MMC_RSP_136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) flags = SDHCI_CMD_RESP_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) else if (cmd->flags & MMC_RSP_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) flags = SDHCI_CMD_RESP_SHORT_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) flags = SDHCI_CMD_RESP_SHORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (cmd->flags & MMC_RSP_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) flags |= SDHCI_CMD_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (cmd->flags & MMC_RSP_OPCODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) flags |= SDHCI_CMD_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) /* CMD19 is special in that the Data Present Select should be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) flags |= SDHCI_CMD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) timeout = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (host->data_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) timeout += nsecs_to_jiffies(host->data_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) else if (!cmd->data && cmd->busy_timeout > 9000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) timeout += 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) sdhci_mod_timer(host, cmd->mrq, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (host->use_external_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) sdhci_external_dma_pre_transfer(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static bool sdhci_present_error(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct mmc_command *cmd, bool present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!present || host->flags & SDHCI_DEVICE_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) cmd->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static bool sdhci_send_command_retry(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct mmc_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) __releases(host->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) __acquires(host->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) struct mmc_command *deferred_cmd = host->deferred_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) int timeout = 10; /* Approx. 10 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) bool present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) while (!sdhci_send_command(host, cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (!timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pr_err("%s: Controller never released inhibit bit(s).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) usleep_range(1000, 1250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) present = host->mmc->ops->get_cd(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* A deferred command might disappear, handle that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (cmd == deferred_cmd && cmd != host->deferred_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (sdhci_present_error(host, cmd, present))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (cmd == host->deferred_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) host->deferred_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) int i, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) reg = SDHCI_RESPONSE + (3 - i) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) cmd->resp[i] = sdhci_readl(host, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /* CRC is stripped so we need to do some shifting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) cmd->resp[i] <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (i != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) cmd->resp[i] |= cmd->resp[i + 1] >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static void sdhci_finish_command(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct mmc_command *cmd = host->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (cmd->flags & MMC_RSP_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (cmd->flags & MMC_RSP_136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) sdhci_read_rsp_136(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) mmc_command_done(host->mmc, cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * The host can send and interrupt when the busy state has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * ended, allowing us to wait without wasting CPU cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * The busy signal uses DAT0 so this is similar to waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * for data to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * Note: The 1.0 specification is a bit ambiguous about this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * feature so there might be some problems with older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (cmd->flags & MMC_RSP_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) DBG("Cannot wait for busy signal when also doing a data transfer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) cmd == host->data_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /* Command complete before busy is ended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* Finished CMD23, now send actual command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (cmd == cmd->mrq->sbc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (!sdhci_send_command(host, cmd->mrq->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) WARN_ON(host->deferred_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) host->deferred_cmd = cmd->mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /* Processed actual command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (host->data && host->data_early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) sdhci_finish_data(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (!cmd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) __sdhci_finish_mrq(host, cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static u16 sdhci_get_preset_value(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) u16 preset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) switch (host->timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) case MMC_TIMING_MMC_HS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) case MMC_TIMING_SD_HS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) case MMC_TIMING_UHS_SDR12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) case MMC_TIMING_UHS_SDR25:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) case MMC_TIMING_UHS_SDR50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) case MMC_TIMING_UHS_SDR104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) case MMC_TIMING_MMC_HS200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) case MMC_TIMING_UHS_DDR50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) case MMC_TIMING_MMC_DDR52:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) case MMC_TIMING_MMC_HS400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) pr_warn("%s: Invalid UHS-I mode selected\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return preset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) unsigned int *actual_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) int div = 0; /* Initialized for compiler warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int real_div = div, clk_mul = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) u16 clk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) bool switch_base_clk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (host->version >= SDHCI_SPEC_300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (host->preset_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) u16 pre_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) pre_val = sdhci_get_preset_value(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (host->clk_mul &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) clk = SDHCI_PROG_CLOCK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) real_div = div + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) clk_mul = host->clk_mul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) real_div = max_t(int, 1, div << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) goto clock_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * Check if the Host Controller supports Programmable Clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * Mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (host->clk_mul) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) for (div = 1; div <= 1024; div++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if ((host->max_clk * host->clk_mul / div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) <= clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if ((host->max_clk * host->clk_mul / div) <= clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * Set Programmable Clock Mode in the Clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * Control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) clk = SDHCI_PROG_CLOCK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) real_div = div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) clk_mul = host->clk_mul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) div--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * Divisor can be too small to reach clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * speed requirement. Then use the base clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) switch_base_clk = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (!host->clk_mul || switch_base_clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* Version 3.00 divisors must be a multiple of 2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (host->max_clk <= clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) div += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if ((host->max_clk / div) <= clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) real_div = div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) div >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) && !div && host->max_clk <= 25000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /* Version 2.00 divisors must be a power of 2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if ((host->max_clk / div) <= clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) real_div = div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) div >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) clock_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (real_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) *actual_clock = (host->max_clk * clk_mul) / real_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) << SDHCI_DIVIDER_HI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) EXPORT_SYMBOL_GPL(sdhci_calc_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) clk |= SDHCI_CLOCK_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* Wait max 150 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) timeout = ktime_add_ms(ktime_get(), 150);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) bool timedout = ktime_after(ktime_get(), timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (clk & SDHCI_CLOCK_INT_STABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) pr_err("%s: Internal clock never stabilised.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) clk |= SDHCI_CLOCK_PLL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) clk &= ~SDHCI_CLOCK_INT_STABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) /* Wait max 150 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) timeout = ktime_add_ms(ktime_get(), 150);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) bool timedout = ktime_after(ktime_get(), timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (clk & SDHCI_CLOCK_INT_STABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) pr_err("%s: PLL clock never stabilised.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) clk |= SDHCI_CLOCK_CARD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) EXPORT_SYMBOL_GPL(sdhci_enable_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) u16 clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) host->mmc->actual_clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (clock == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) sdhci_enable_clk(host, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) EXPORT_SYMBOL_GPL(sdhci_set_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) unsigned short vdd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (mode != MMC_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) unsigned short vdd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) u8 pwr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (mode != MMC_POWER_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) switch (1 << vdd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) case MMC_VDD_165_195:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * Without a regulator, SDHCI does not support 2.0v
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * so we only get here if the driver deliberately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * added the 2.0v range to ocr_avail. Map it to 1.8v
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * for the purpose of turning on the power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) case MMC_VDD_20_21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) pwr = SDHCI_POWER_180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) case MMC_VDD_29_30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) case MMC_VDD_30_31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) pwr = SDHCI_POWER_300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) case MMC_VDD_32_33:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) case MMC_VDD_33_34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * 3.4 ~ 3.6V are valid only for those platforms where it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * known that the voltage range is supported by hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) case MMC_VDD_34_35:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) case MMC_VDD_35_36:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) pwr = SDHCI_POWER_330;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) WARN(1, "%s: Invalid vdd %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) mmc_hostname(host->mmc), vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (host->pwr == pwr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) host->pwr = pwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (pwr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) sdhci_runtime_pm_bus_off(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * Spec says that we should clear the power reg before setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * a new value. Some controllers don't seem to like this though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * At least the Marvell CaFe chip gets confused if we set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * voltage and set turn on power at the same time, so set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * voltage first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) pwr |= SDHCI_POWER_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) sdhci_runtime_pm_bus_on(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * Some controllers need an extra 10ms delay of 10ms before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * they can apply clock after applying power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) unsigned short vdd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (IS_ERR(host->mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) sdhci_set_power_noreg(host, mode, vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) sdhci_set_power_reg(host, mode, vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) EXPORT_SYMBOL_GPL(sdhci_set_power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) * Some controllers need to configure a valid bus voltage on their power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * register regardless of whether an external regulator is taking care of power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * supply. This helper function takes care of it if set as the controller's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * sdhci_ops.set_power callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) unsigned char mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) unsigned short vdd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (!IS_ERR(host->mmc->supply.vmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) sdhci_set_power_noreg(host, mode, vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * MMC callbacks *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) bool present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /* Firstly check card presence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) present = mmc->ops->get_cd(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) sdhci_led_activate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (sdhci_present_error(host, mrq->cmd, present))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (!sdhci_send_command_retry(host, cmd, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) sdhci_finish_mrq(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) EXPORT_SYMBOL_GPL(sdhci_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (sdhci_present_error(host, mrq->cmd, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) sdhci_finish_mrq(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * The HSQ may send a command in interrupt context without polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * the busy signaling, which means we should return BUSY if controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * has not released inhibit bits to allow HSQ trying to send request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * again in non-atomic context. So we should not finish this request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (!sdhci_send_command(host, cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) sdhci_led_activate(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) EXPORT_SYMBOL_GPL(sdhci_request_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) void sdhci_set_bus_width(struct sdhci_host *host, int width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (width == MMC_BUS_WIDTH_8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ctrl &= ~SDHCI_CTRL_4BITBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) ctrl |= SDHCI_CTRL_8BITBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) ctrl &= ~SDHCI_CTRL_8BITBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (width == MMC_BUS_WIDTH_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) ctrl |= SDHCI_CTRL_4BITBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) ctrl &= ~SDHCI_CTRL_4BITBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) u16 ctrl_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) /* Select Bus Speed Mode for host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if ((timing == MMC_TIMING_MMC_HS200) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) (timing == MMC_TIMING_UHS_SDR104))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) else if (timing == MMC_TIMING_UHS_SDR12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) else if (timing == MMC_TIMING_UHS_SDR25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) else if (timing == MMC_TIMING_UHS_SDR50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) else if ((timing == MMC_TIMING_UHS_DDR50) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) (timing == MMC_TIMING_MMC_DDR52))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) else if (timing == MMC_TIMING_MMC_HS400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (ios->power_mode == MMC_POWER_UNDEFINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (host->flags & SDHCI_DEVICE_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (!IS_ERR(mmc->supply.vmmc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ios->power_mode == MMC_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) * Reset the chip on each power off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * Should clear out any weird states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (ios->power_mode == MMC_POWER_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) sdhci_reinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (host->version >= SDHCI_SPEC_300 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) (ios->power_mode == MMC_POWER_UP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) sdhci_enable_preset_value(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (!ios->clock || ios->clock != host->clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) host->ops->set_clock(host, ios->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) host->clock = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) host->clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) host->timeout_clk = host->mmc->actual_clock ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) host->mmc->actual_clock / 1000 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) host->clock / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) host->mmc->max_busy_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) host->ops->get_max_timeout_count ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) host->ops->get_max_timeout_count(host) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 1 << 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) host->mmc->max_busy_timeout /= host->timeout_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (host->ops->set_power)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) host->ops->set_power(host, ios->power_mode, ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) sdhci_set_power(host, ios->power_mode, ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) if (host->ops->platform_send_init_74_clocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) host->ops->platform_send_init_74_clocks(host, ios->power_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) host->ops->set_bus_width(host, ios->bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (ios->timing == MMC_TIMING_SD_HS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) ios->timing == MMC_TIMING_MMC_HS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) ios->timing == MMC_TIMING_MMC_HS400 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) ios->timing == MMC_TIMING_MMC_HS200 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) ios->timing == MMC_TIMING_MMC_DDR52 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) ios->timing == MMC_TIMING_UHS_SDR50 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) ios->timing == MMC_TIMING_UHS_SDR104 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) ios->timing == MMC_TIMING_UHS_DDR50 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) ios->timing == MMC_TIMING_UHS_SDR25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) ctrl |= SDHCI_CTRL_HISPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) ctrl &= ~SDHCI_CTRL_HISPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (host->version >= SDHCI_SPEC_300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) u16 clk, ctrl_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (!host->preset_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * We only need to set Driver Strength if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * preset value enable is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) pr_warn("%s: invalid driver type, default to driver type B\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * According to SDHC Spec v3.00, if the Preset Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * Enable in the Host Control 2 register is set, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * need to reset SD Clock Enable before changing High
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * Speed Enable to avoid generating clock gliches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) /* Reset SD Clock Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) clk &= ~SDHCI_CLOCK_CARD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* Re-enable SD Clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) host->ops->set_clock(host, host->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) /* Reset SD Clock Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) clk &= ~SDHCI_CLOCK_CARD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) host->ops->set_uhs_signaling(host, ios->timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) host->timing = ios->timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) ((ios->timing == MMC_TIMING_UHS_SDR12) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) (ios->timing == MMC_TIMING_UHS_SDR25) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) (ios->timing == MMC_TIMING_UHS_SDR50) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) (ios->timing == MMC_TIMING_UHS_SDR104) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) (ios->timing == MMC_TIMING_UHS_DDR50) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) (ios->timing == MMC_TIMING_MMC_DDR52))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) u16 preset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) sdhci_enable_preset_value(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) preset = sdhci_get_preset_value(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) preset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) /* Re-enable SD Clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) host->ops->set_clock(host, host->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * Some (ENE) controllers go apeshit on some ios operation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * signalling timeout and CRC errors even on CMD0. Resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * it on each ios seems to solve the problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) EXPORT_SYMBOL_GPL(sdhci_set_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) static int sdhci_get_cd(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) int gpio_cd = mmc_gpio_get_cd(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) bool allow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (host->flags & SDHCI_DEVICE_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) /* If nonremovable, assume that the card is always present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (!mmc_card_is_removable(host->mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) trace_android_vh_sdhci_get_cd(host, &allow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (!allow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * Try slot gpio detect, if defined it take precedence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * over build in controller functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if (gpio_cd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) return !!gpio_cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /* If polling, assume that the card is always present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* Host native card detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) static int sdhci_check_ro(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) int is_readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (host->flags & SDHCI_DEVICE_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) is_readonly = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) else if (host->ops->get_ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) is_readonly = host->ops->get_ro(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) else if (mmc_can_gpio_ro(host->mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) is_readonly = mmc_gpio_get_ro(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) & SDHCI_WRITE_PROTECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) /* This quirk needs to be replaced by a callback-function later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) !is_readonly : is_readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) #define SAMPLE_COUNT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) static int sdhci_get_ro(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) int i, ro_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) return sdhci_check_ro(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) ro_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) for (i = 0; i < SAMPLE_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (sdhci_check_ro(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (++ro_count > SAMPLE_COUNT / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) msleep(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static void sdhci_hw_reset(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (host->ops && host->ops->hw_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) host->ops->hw_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (!(host->flags & SDHCI_DEVICE_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) host->ier |= SDHCI_INT_CARD_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) host->ier &= ~SDHCI_INT_CARD_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) pm_runtime_get_noresume(host->mmc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) sdhci_enable_sdio_irq_nolock(host, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) pm_runtime_put_noidle(host->mmc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) sdhci_enable_sdio_irq_nolock(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) u16 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * Signal Voltage Switching is only applicable for Host Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * v3.00 and above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (host->version < SDHCI_SPEC_300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) switch (ios->signal_voltage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) case MMC_SIGNAL_VOLTAGE_330:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (!(host->flags & SDHCI_SIGNALING_330))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) ctrl &= ~SDHCI_CTRL_VDD_180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) ret = mmc_regulator_set_vqmmc(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /* Wait for 5ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) usleep_range(5000, 5500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) /* 3.3V regulator output should be stable within 5 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (!(ctrl & SDHCI_CTRL_VDD_180))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) pr_warn("%s: 3.3V regulator output did not become stable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) case MMC_SIGNAL_VOLTAGE_180:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (!(host->flags & SDHCI_SIGNALING_180))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) ret = mmc_regulator_set_vqmmc(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * Enable 1.8V Signal Enable in the Host Control2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) * register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) ctrl |= SDHCI_CTRL_VDD_180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) /* Some controller need to do more when switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (host->ops->voltage_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) host->ops->voltage_switch(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) /* 1.8V regulator output should be stable within 5 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (ctrl & SDHCI_CTRL_VDD_180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) pr_warn("%s: 1.8V regulator output did not become stable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) case MMC_SIGNAL_VOLTAGE_120:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (!(host->flags & SDHCI_SIGNALING_120))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) ret = mmc_regulator_set_vqmmc(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* No signal voltage switch required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) static int sdhci_card_busy(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) u32 present_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) /* Check whether DAT[0] is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return !(present_state & SDHCI_DATA_0_LVL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) host->flags |= SDHCI_HS400_TUNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) void sdhci_start_tuning(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) u16 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) ctrl |= SDHCI_CTRL_EXEC_TUNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) ctrl |= SDHCI_CTRL_TUNED_CLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * As per the Host Controller spec v3.00, tuning command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * generates Buffer Read Ready interrupt, so enable that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * Note: The spec clearly says that when tuning sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) * is being performed, the controller does not generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) * interrupts other than Buffer Read Ready interrupt. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * to make sure we don't hit a controller bug, we _only_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * enable Buffer Read Ready interrupt here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) EXPORT_SYMBOL_GPL(sdhci_start_tuning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) void sdhci_end_tuning(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) EXPORT_SYMBOL_GPL(sdhci_end_tuning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) void sdhci_reset_tuning(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) u16 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ctrl &= ~SDHCI_CTRL_TUNED_CLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) sdhci_reset_tuning(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) sdhci_do_reset(host, SDHCI_RESET_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) sdhci_do_reset(host, SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) sdhci_end_tuning(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) mmc_abort_tuning(host->mmc, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * tuning command does not have a data payload (or rather the hardware does it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * interrupt setup is different to other commands and there is no timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * interrupt so special handling is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) struct mmc_command cmd = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct mmc_request mrq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) u32 b = host->sdma_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) cmd.opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) cmd.mrq = &mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) mrq.cmd = &cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * In response to CMD19, the card sends 64 bytes of tuning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) * block to the Host Controller. So we set the block size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) * to 64 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) mmc->ios.bus_width == MMC_BUS_WIDTH_8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * The tuning block is sent by the card to the host controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) * So we set the TRNS_READ bit in the Transfer Mode register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) * This also takes care of setting DMA Enable and Multi Block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) * Select in the same register to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (!sdhci_send_command_retry(host, &cmd, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) host->tuning_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) sdhci_del_timer(host, &mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) host->tuning_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /* Wait for Buffer Read Ready interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) msecs_to_jiffies(50));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) EXPORT_SYMBOL_GPL(sdhci_send_tuning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * of loops reaches tuning loop count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) for (i = 0; i < host->tuning_loop_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) u16 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) sdhci_send_tuning(host, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (!host->tuning_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) sdhci_abort_tuning(host, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) /* Spec does not require a delay between tuning cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (host->tuning_delay > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) mdelay(host->tuning_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (ctrl & SDHCI_CTRL_TUNED_CLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) return 0; /* Success! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) sdhci_reset_tuning(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) unsigned int tuning_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) bool hs400_tuning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) hs400_tuning = host->flags & SDHCI_HS400_TUNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (host->tuning_mode == SDHCI_TUNING_MODE_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) tuning_count = host->tuning_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) * The Host Controller needs tuning in case of SDR104 and DDR50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) * the Capabilities register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) * If the Host Controller supports the HS200 mode then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) * tuning function has to be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) switch (host->timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) /* HS400 tuning is done in HS200 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) case MMC_TIMING_MMC_HS400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) case MMC_TIMING_MMC_HS200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) * Periodic re-tuning for HS400 is not expected to be needed, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) * disable it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (hs400_tuning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) tuning_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) case MMC_TIMING_UHS_SDR104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) case MMC_TIMING_UHS_DDR50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) case MMC_TIMING_UHS_SDR50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (host->ops->platform_execute_tuning) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) err = host->ops->platform_execute_tuning(host, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) host->mmc->retune_period = tuning_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (host->tuning_delay < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) sdhci_start_tuning(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) host->tuning_err = __sdhci_execute_tuning(host, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) sdhci_end_tuning(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) host->flags &= ~SDHCI_HS400_TUNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) /* Host Controller v3.00 defines preset value registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) if (host->version < SDHCI_SPEC_300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) * We only enable or disable Preset Value if they are not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * enabled or disabled respectively. Otherwise, we bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (host->preset_enabled != enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) host->flags |= SDHCI_PV_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) host->flags &= ~SDHCI_PV_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) host->preset_enabled = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (data->host_cookie != COOKIE_UNMAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) mrq->data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) * No pre-mapping in the pre hook if we're using the bounce buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) * for that we would need two bounce buffers since one buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) * in flight when this is getting called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (host->data_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) host->data_cmd->error = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) sdhci_finish_mrq(host, host->data_cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (host->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) host->cmd->error = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) sdhci_finish_mrq(host, host->cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) static void sdhci_card_event(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) int present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) /* First check if client has provided their own card event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (host->ops->card_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) host->ops->card_event(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) present = mmc->ops->get_cd(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) /* Check sdhci_has_requests() first in case we are runtime suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) if (sdhci_has_requests(host) && !present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) pr_err("%s: Card removed during transfer!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) pr_err("%s: Resetting controller.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) sdhci_do_reset(host, SDHCI_RESET_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) sdhci_do_reset(host, SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) sdhci_error_out_mrqs(host, -ENOMEDIUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) static const struct mmc_host_ops sdhci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) .request = sdhci_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) .post_req = sdhci_post_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) .pre_req = sdhci_pre_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) .set_ios = sdhci_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) .get_cd = sdhci_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) .get_ro = sdhci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) .hw_reset = sdhci_hw_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) .enable_sdio_irq = sdhci_enable_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) .ack_sdio_irq = sdhci_ack_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) .execute_tuning = sdhci_execute_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) .card_event = sdhci_card_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) .card_busy = sdhci_card_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) * Request done *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) static bool sdhci_request_done(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) for (i = 0; i < SDHCI_MAX_MRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) mrq = host->mrqs_done[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (!mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) * The controller needs a reset of internal state machines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) * upon error conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (sdhci_needs_reset(host, mrq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) * Do not finish until command and data lines are available for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) * reset. Note there can only be one other mrq, so it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * also be in mrqs_done, otherwise host->cmd and host->data_cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * would both be null.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (host->cmd || host->data_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) /* Some controllers need this kick or reset won't work here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) /* This is to force an update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) host->ops->set_clock(host, host->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) * Spec says we should do both at the same time, but Ricoh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) * controllers do not like that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) sdhci_do_reset(host, SDHCI_RESET_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) sdhci_do_reset(host, SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) host->pending_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) * Always unmap the data buffers if they were mapped by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) * sdhci_prepare_data() whenever we finish with a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * This avoids leaking DMA mappings on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (host->flags & SDHCI_REQ_USE_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (host->use_external_dma && data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) (mrq->cmd->error || data->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) struct dma_chan *chan = sdhci_external_dma_channel(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) host->mrqs_done[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) dmaengine_terminate_sync(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) sdhci_set_mrq_done(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (data && data->host_cookie == COOKIE_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (host->bounce_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * On reads, copy the bounced data into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) * sglist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) unsigned int length = data->bytes_xfered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) if (length > host->bounce_buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) mmc_hostname(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) host->bounce_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) data->bytes_xfered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /* Cap it down and continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) length = host->bounce_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) dma_sync_single_for_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) host->bounce_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) host->bounce_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) sg_copy_from_buffer(data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) host->bounce_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) /* No copying, just switch ownership */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) dma_sync_single_for_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) host->bounce_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) host->bounce_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) /* Unmap the raw data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) dma_unmap_sg(mmc_dev(host->mmc), data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) host->mrqs_done[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (host->ops->request_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) host->ops->request_done(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) mmc_request_done(host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) static void sdhci_complete_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct sdhci_host *host = container_of(work, struct sdhci_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) complete_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) while (!sdhci_request_done(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) static void sdhci_timeout_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) struct sdhci_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) host = from_timer(host, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) host->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) sdhci_finish_mrq(host, host->cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) static void sdhci_timeout_data_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct sdhci_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) host = from_timer(host, t, data_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (host->data || host->data_cmd ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) (host->cmd && sdhci_data_line_cmd(host->cmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) pr_err("%s: Timeout waiting for hardware interrupt.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (host->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) host->data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) __sdhci_finish_data(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) queue_work(host->complete_wq, &host->complete_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) } else if (host->data_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) host->data_cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) sdhci_finish_mrq(host, host->data_cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) host->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) sdhci_finish_mrq(host, host->cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) * Interrupt handling *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) /* Handle auto-CMD12 error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) struct mmc_request *mrq = host->data_cmd->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) SDHCI_INT_DATA_TIMEOUT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) SDHCI_INT_DATA_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) /* Treat auto-CMD12 error the same as data error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) *intmask_p |= data_err_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (!host->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) * SDHCI recovers from errors by resetting the cmd and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) * circuits. Until that is done, there very well might be more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) * interrupts, so ignore them in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (host->pending_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) mmc_hostname(host->mmc), (unsigned)intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (intmask & SDHCI_INT_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) host->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) host->cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) /* Treat data command CRC error the same as data CRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) if (host->cmd->data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) SDHCI_INT_CRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) *intmask_p |= SDHCI_INT_DATA_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) __sdhci_finish_mrq(host, host->cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) /* Handle auto-CMD23 error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) struct mmc_request *mrq = host->cmd->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) -ETIMEDOUT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) mrq->sbc->error = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) __sdhci_finish_mrq(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (intmask & SDHCI_INT_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) sdhci_finish_command(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) static void sdhci_adma_show_error(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) void *desc = host->adma_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) dma_addr_t dma = host->adma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) struct sdhci_adma2_64_desc *dma_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (host->flags & SDHCI_USE_64_BIT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) (unsigned long long)dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) le32_to_cpu(dma_desc->addr_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) le32_to_cpu(dma_desc->addr_lo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) le16_to_cpu(dma_desc->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) le16_to_cpu(dma_desc->cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) (unsigned long long)dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) le32_to_cpu(dma_desc->addr_lo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) le16_to_cpu(dma_desc->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) le16_to_cpu(dma_desc->cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) desc += host->desc_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) dma += host->desc_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) u32 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) /* CMD19 generates _only_ Buffer Read Ready interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (intmask & SDHCI_INT_DATA_AVAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (command == MMC_SEND_TUNING_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) command == MMC_SEND_TUNING_BLOCK_HS200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) host->tuning_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) wake_up(&host->buf_ready_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) if (!host->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) struct mmc_command *data_cmd = host->data_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) * The "data complete" interrupt is also used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) * indicate that a busy state has ended. See comment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) * above in sdhci_cmd_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (intmask & SDHCI_INT_DATA_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) host->data_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) data_cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) __sdhci_finish_mrq(host, data_cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (intmask & SDHCI_INT_DATA_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) host->data_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) * Some cards handle busy-end interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) * before the command completed, so make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) * sure we do things in the proper order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) if (host->cmd == data_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) __sdhci_finish_mrq(host, data_cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) * SDHCI recovers from errors by resetting the cmd and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) * circuits. Until that is done, there very well might be more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) * interrupts, so ignore them in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (host->pending_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) mmc_hostname(host->mmc), (unsigned)intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) if (intmask & SDHCI_INT_DATA_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) host->data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) else if (intmask & SDHCI_INT_DATA_END_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) host->data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) else if ((intmask & SDHCI_INT_DATA_CRC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) != MMC_BUS_TEST_R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) host->data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) else if (intmask & SDHCI_INT_ADMA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) sdhci_adma_show_error(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) host->data->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (host->ops->adma_workaround)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) host->ops->adma_workaround(host, intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) if (host->data->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) sdhci_finish_data(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) sdhci_transfer_pio(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) * We currently don't do anything fancy with DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) * boundaries, but as we can't disable the feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) * we need to at least restart the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * should return a valid address to continue from, but as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) * some controllers are faulty, don't trust them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) if (intmask & SDHCI_INT_DMA_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) dma_addr_t dmastart, dmanow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) dmastart = sdhci_sdma_address(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) dmanow = dmastart + host->data->bytes_xfered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) * Force update to the next DMA block boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) dmanow = (dmanow &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) SDHCI_DEFAULT_BOUNDARY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) host->data->bytes_xfered = dmanow - dmastart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) &dmastart, host->data->bytes_xfered, &dmanow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) sdhci_set_sdma_addr(host, dmanow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (intmask & SDHCI_INT_DATA_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) if (host->cmd == host->data_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) * Data managed to finish before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) * command completed. Make sure we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * things in the proper order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) host->data_early = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) sdhci_finish_data(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) static inline bool sdhci_defer_done(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) return host->pending_reset || host->always_defer_done ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) ((host->flags & SDHCI_REQ_USE_DMA) && data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) data->host_cookie == COOKIE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) static irqreturn_t sdhci_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) irqreturn_t result = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) struct sdhci_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) u32 intmask, mask, unexpected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) int max_loops = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) if (host->runtime_suspended) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) intmask = sdhci_readl(host, SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) if (!intmask || intmask == 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) result = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) DBG("IRQ status 0x%08x\n", intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (host->ops->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) intmask = host->ops->irq(host, intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (!intmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) /* Clear selected interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) SDHCI_INT_BUS_POWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) sdhci_writel(host, mask, SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) SDHCI_CARD_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) * There is a observation on i.mx esdhc. INSERT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) * bit will be immediately set again when it gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) * cleared, if a card is inserted. We have to mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) * the irq to prevent interrupt storm which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) * freeze the system. And the REMOVE gets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) * same situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) * More testing are needed here to ensure it works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) * for other platforms though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) host->ier &= ~(SDHCI_INT_CARD_INSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) SDHCI_INT_CARD_REMOVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) host->ier |= present ? SDHCI_INT_CARD_REMOVE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) SDHCI_INT_CARD_INSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) SDHCI_INT_CARD_REMOVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) result = IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) if (intmask & SDHCI_INT_CMD_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (intmask & SDHCI_INT_DATA_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (intmask & SDHCI_INT_BUS_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) pr_err("%s: Card is consuming too much power!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) if (intmask & SDHCI_INT_RETUNE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) mmc_retune_needed(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if ((intmask & SDHCI_INT_CARD_INT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) (host->ier & SDHCI_INT_CARD_INT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) sdhci_enable_sdio_irq_nolock(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) sdio_signal_irq(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) if (intmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) unexpected |= intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) sdhci_writel(host, intmask, SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (result == IRQ_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) result = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) intmask = sdhci_readl(host, SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) } while (intmask && --max_loops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) /* Determine if mrqs can be completed immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) for (i = 0; i < SDHCI_MAX_MRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) struct mmc_request *mrq = host->mrqs_done[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) if (!mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) if (sdhci_defer_done(host, mrq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) result = IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) mrqs_done[i] = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) host->mrqs_done[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (host->deferred_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) result = IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) /* Process mrqs ready for immediate completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) for (i = 0; i < SDHCI_MAX_MRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if (!mrqs_done[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) if (host->ops->request_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) host->ops->request_done(host, mrqs_done[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) mmc_request_done(host->mmc, mrqs_done[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (unexpected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) pr_err("%s: Unexpected interrupt 0x%08x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) mmc_hostname(host->mmc), unexpected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) struct sdhci_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) u32 isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) while (!sdhci_request_done(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) isr = host->thread_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) host->thread_isr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) cmd = host->deferred_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (cmd && !sdhci_send_command_retry(host, cmd, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) sdhci_finish_mrq(host, cmd->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) mmc->ops->card_event(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) mmc_detect_change(mmc, msecs_to_jiffies(200));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) * Suspend/resume *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) return mmc_card_is_removable(host->mmc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) !mmc_can_gpio_cd(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) * To enable wakeup events, the corresponding events have to be enabled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) * Table' in the SD Host Controller Standard Specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) * It is useless to restore SDHCI_INT_ENABLE state in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) * sdhci_disable_irq_wakeups() since it will be set by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) * sdhci_enable_card_detection() or sdhci_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) SDHCI_WAKE_ON_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) u32 irq_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) u8 wake_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) if (sdhci_cd_irq_can_wakeup(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) if (mmc_card_wake_sdio_irq(host->mmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) wake_val |= SDHCI_WAKE_ON_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) irq_val |= SDHCI_INT_CARD_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) if (!irq_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) val |= wake_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) host->irq_wake_enabled = !enable_irq_wake(host->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) return host->irq_wake_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) | SDHCI_WAKE_ON_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) disable_irq_wake(host->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) host->irq_wake_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) int sdhci_suspend_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) sdhci_disable_card_detection(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) mmc_retune_timer_stop(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (!device_may_wakeup(mmc_dev(host->mmc)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) !sdhci_enable_irq_wakeups(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) host->ier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) sdhci_writel(host, 0, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) free_irq(host->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) EXPORT_SYMBOL_GPL(sdhci_suspend_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) int sdhci_resume_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) if (host->ops->enable_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) host->ops->enable_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) /* Card keeps power but host controller does not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) sdhci_init(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) host->pwr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) host->clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) mmc->ops->set_ios(mmc, &mmc->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) if (host->irq_wake_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) sdhci_disable_irq_wakeups(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) ret = request_threaded_irq(host->irq, sdhci_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) sdhci_thread_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) mmc_hostname(host->mmc), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) sdhci_enable_card_detection(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) EXPORT_SYMBOL_GPL(sdhci_resume_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) int sdhci_runtime_suspend_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) mmc_retune_timer_stop(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) host->ier &= SDHCI_INT_CARD_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) synchronize_hardirq(host->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) host->runtime_suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) int host_flags = host->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) if (host->ops->enable_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) host->ops->enable_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) sdhci_init(host, soft_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) mmc->ios.power_mode != MMC_POWER_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) /* Force clock and power re-program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) host->pwr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) host->clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) mmc->ops->set_ios(mmc, &mmc->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) if ((host_flags & SDHCI_PV_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) sdhci_enable_preset_value(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) mmc->ops->hs400_enhanced_strobe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) host->runtime_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) /* Enable SDIO IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) if (sdio_irq_claimed(mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) sdhci_enable_sdio_irq_nolock(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) /* Enable Card Detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) sdhci_enable_card_detection(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) * Command Queue Engine (CQE) helpers *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) void sdhci_cqe_enable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) ctrl &= ~SDHCI_CTRL_DMA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) * Host from V4.10 supports ADMA3 DMA type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) * ADMA3 performs integrated descriptor which is more suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) * for cmd queuing to fetch both command and transfer descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) ctrl |= SDHCI_CTRL_ADMA3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) else if (host->flags & SDHCI_USE_64_BIT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) ctrl |= SDHCI_CTRL_ADMA64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) ctrl |= SDHCI_CTRL_ADMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) SDHCI_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) /* Set maximum timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) sdhci_set_timeout(host, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) host->ier = host->cqe_ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) host->cqe_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) mmc_hostname(mmc), host->ier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) sdhci_readl(host, SDHCI_INT_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) sdhci_set_default_irqs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) host->cqe_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) if (recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) sdhci_do_reset(host, SDHCI_RESET_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) sdhci_do_reset(host, SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) mmc_hostname(mmc), host->ier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) sdhci_readl(host, SDHCI_INT_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) int *data_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) if (!host->cqe_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) *cmd_error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) else if (intmask & SDHCI_INT_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) *cmd_error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) *cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) *data_error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) else if (intmask & SDHCI_INT_DATA_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) *data_error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) else if (intmask & SDHCI_INT_ADMA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) *data_error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) *data_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) /* Clear selected interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) mask = intmask & host->cqe_ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) sdhci_writel(host, mask, SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if (intmask & SDHCI_INT_BUS_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) pr_err("%s: Card is consuming too much power!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) if (intmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) sdhci_writel(host, intmask, SDHCI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) mmc_hostname(host->mmc), intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) sdhci_dumpregs(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) * Device allocation/registration *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) struct sdhci_host *sdhci_alloc_host(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) size_t priv_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) struct sdhci_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) WARN_ON(dev == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) host->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) host->mmc_host_ops = sdhci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) mmc->ops = &host->mmc_host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) host->flags = SDHCI_SIGNALING_330;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) host->cqe_ier = SDHCI_CQE_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) host->tuning_delay = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) host->tuning_loop_count = MAX_TUNING_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) * The DMA table descriptor count is calculated as the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) * number of segments times 2, to allow for an alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) * descriptor for each segment, plus 1 for a nop end descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) return host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) EXPORT_SYMBOL_GPL(sdhci_alloc_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) static int sdhci_set_dma_mask(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) struct device *dev = mmc_dev(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) host->flags &= ~SDHCI_USE_64_BIT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) /* Try 64-bit mask if hardware is capable of it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) if (host->flags & SDHCI_USE_64_BIT_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) pr_warn("%s: Failed to set 64-bit DMA mask.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) host->flags &= ~SDHCI_USE_64_BIT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) /* 32-bit mask as default & fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) pr_warn("%s: Failed to set 32-bit DMA mask.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) const u32 *caps, const u32 *caps1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) u64 dt_caps_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) u64 dt_caps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (host->read_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) host->read_caps = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (debug_quirks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) host->quirks = debug_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) if (debug_quirks2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) host->quirks2 = debug_quirks2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) sdhci_do_reset(host, SDHCI_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (host->v4_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) sdhci_do_enable_v4_mode(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) device_property_read_u64_array(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) "sdhci-caps-mask", &dt_caps_mask, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) device_property_read_u64_array(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) "sdhci-caps", &dt_caps, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) if (caps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) host->caps = *caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) host->caps &= ~lower_32_bits(dt_caps_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) host->caps |= lower_32_bits(dt_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) if (host->version < SDHCI_SPEC_300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) if (caps1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) host->caps1 = *caps1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) host->caps1 &= ~upper_32_bits(dt_caps_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) host->caps1 |= upper_32_bits(dt_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) EXPORT_SYMBOL_GPL(__sdhci_read_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) unsigned int max_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) unsigned int bounce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) * has diminishing returns, this is probably because SD/MMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) * cards are usually optimized to handle this size of requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) bounce_size = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) * Adjust downwards to maximum request size if this is less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) * than our segment size, else hammer down the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) * request size to the maximum buffer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (mmc->max_req_size < bounce_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) bounce_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) max_blocks = bounce_size / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) * When we just support one segment, we can get significant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) * speedups by the help of a bounce buffer to group scattered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) * reads/writes together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) host->bounce_buffer = devm_kmalloc(mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) bounce_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) if (!host->bounce_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) mmc_hostname(mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) bounce_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) * Exiting with zero here makes sure we proceed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) * mmc->max_segs == 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) host->bounce_addr = dma_map_single(mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) host->bounce_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) bounce_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) ret = dma_mapping_error(mmc->parent, host->bounce_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) /* Again fall back to max_segs == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) host->bounce_buffer_size = bounce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) /* Lie about this since we're bouncing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) mmc->max_segs = max_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) mmc->max_seg_size = bounce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) mmc->max_req_size = bounce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) mmc_hostname(mmc), max_blocks, bounce_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) * According to SD Host Controller spec v4.10, bit[27] added from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) * version 4.10 in Capabilities Register is used as 64-bit System
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) * Address support for V4 mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) return host->caps & SDHCI_CAN_64BIT_V4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) return host->caps & SDHCI_CAN_64BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) int sdhci_setup_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) u32 max_current_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) unsigned int ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) unsigned int override_timeout_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) u32 max_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) bool enable_vqmmc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) WARN_ON(host == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) if (host == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) * If there are external regulators, get them. Note this must be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) * early before resetting the host and reading the capabilities so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * the host can take the appropriate action if regulators are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) if (!mmc->supply.vqmmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) ret = mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) enable_vqmmc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) DBG("Version: 0x%08x | Present: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) sdhci_readw(host, SDHCI_HOST_VERSION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) sdhci_readl(host, SDHCI_PRESENT_STATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) sdhci_readl(host, SDHCI_CAPABILITIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) sdhci_readl(host, SDHCI_CAPABILITIES_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) sdhci_read_caps(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) override_timeout_clk = host->timeout_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) if (host->version > SDHCI_SPEC_420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) mmc_hostname(mmc), host->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) host->flags |= SDHCI_USE_SDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) else if (!(host->caps & SDHCI_CAN_DO_SDMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) DBG("Controller doesn't have SDMA capability\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) host->flags |= SDHCI_USE_SDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) (host->flags & SDHCI_USE_SDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) DBG("Disabling DMA as it is marked broken\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) host->flags &= ~SDHCI_USE_SDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) if ((host->version >= SDHCI_SPEC_200) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) (host->caps & SDHCI_CAN_DO_ADMA2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) host->flags |= SDHCI_USE_ADMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) (host->flags & SDHCI_USE_ADMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) DBG("Disabling ADMA as it is marked broken\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) host->flags &= ~SDHCI_USE_ADMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) if (sdhci_can_64bit_dma(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) host->flags |= SDHCI_USE_64_BIT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) if (host->use_external_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) ret = sdhci_external_dma_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) goto unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) * Fall back to use the DMA/PIO integrated in standard SDHCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) * instead of external DMA devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) else if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) sdhci_switch_external_dma(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) /* Disable internal DMA sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if (host->ops->set_dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) ret = host->ops->set_dma_mask(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) ret = sdhci_set_dma_mask(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (!ret && host->ops->enable_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) ret = host->ops->enable_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) pr_warn("%s: No suitable DMA available - falling back to PIO\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) /* SDMA does not support 64-bit DMA if v4 mode not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) host->flags &= ~SDHCI_USE_SDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) if (!(host->flags & SDHCI_USE_64_BIT_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) else if (!host->alloc_desc_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) host->desc_sz = host->alloc_desc_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) * Use zalloc to zero the reserved high 32-bits of 128-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) * descriptors so that they never need to be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) buf = dma_alloc_coherent(mmc_dev(mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) host->align_buffer_sz + host->adma_table_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) &dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) host->flags &= ~SDHCI_USE_ADMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) } else if ((dma + host->align_buffer_sz) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) (SDHCI_ADMA2_DESC_ALIGN - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) host->flags &= ~SDHCI_USE_ADMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) host->adma_table_sz, buf, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) host->align_buffer = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) host->align_addr = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) host->adma_table = buf + host->align_buffer_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) host->adma_addr = dma + host->align_buffer_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) * If we use DMA, then it's up to the caller to set the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) * mask, but PIO does not need the hw shim so we set a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) * mask here in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) host->dma_mask = DMA_BIT_MASK(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) mmc_dev(mmc)->dma_mask = &host->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) if (host->version >= SDHCI_SPEC_300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) host->max_clk *= 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) if (host->max_clk == 0 || host->quirks &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) if (!host->ops->get_max_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) pr_err("%s: Hardware doesn't specify base clock frequency.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) goto undma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) host->max_clk = host->ops->get_max_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) * In case of Host Controller v3.00, find out whether clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) * multiplier is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) * In case the value in Clock Multiplier is 0, then programmable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) * clock mode is not supported, otherwise the actual clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) * multiplier is one more than the value of Clock Multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) * in the Capabilities Register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) if (host->clk_mul)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) host->clk_mul += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) * Set host parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) max_clk = host->max_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) if (host->ops->get_min_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) mmc->f_min = host->ops->get_min_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) else if (host->version >= SDHCI_SPEC_300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) if (host->clk_mul)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) max_clk = host->max_clk * host->clk_mul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) * Divided Clock Mode minimum clock rate is always less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) * Programmable Clock Mode minimum clock rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) if (!mmc->f_max || mmc->f_max > max_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) mmc->f_max = max_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) host->timeout_clk *= 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (host->timeout_clk == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) if (!host->ops->get_timeout_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) goto undma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) host->timeout_clk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) DIV_ROUND_UP(host->ops->get_timeout_clock(host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) if (override_timeout_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) host->timeout_clk = override_timeout_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) host->ops->get_max_timeout_count(host) : 1 << 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) mmc->max_busy_timeout /= host->timeout_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) !host->ops->get_max_timeout_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) mmc->max_busy_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) host->flags |= SDHCI_AUTO_CMD12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) * For v4 mode, SDMA may use Auto-CMD23 as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) if ((host->version >= SDHCI_SPEC_300) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) ((host->flags & SDHCI_USE_ADMA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) host->flags |= SDHCI_AUTO_CMD23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) DBG("Auto-CMD23 available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) DBG("Auto-CMD23 unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) * A controller may support 8-bit width, but the board itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) * might not have the pins brought out. Boards that support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * their platform code before calling sdhci_add_host(), and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * won't assume 8-bit width for hosts without that CAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) mmc->caps |= MMC_CAP_4_BIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) mmc->caps &= ~MMC_CAP_CMD23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) if (host->caps & SDHCI_CAN_DO_HISPD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) mmc_card_is_removable(mmc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) mmc_gpio_get_cd(host->mmc) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) mmc->caps |= MMC_CAP_NEEDS_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (enable_vqmmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) ret = regulator_enable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) host->sdhci_core_to_disable_vqmmc = !ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) /* If vqmmc provides no 1.8V signalling, then there's no UHS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 1950000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) SDHCI_SUPPORT_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) SDHCI_SUPPORT_DDR50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) /* In eMMC case vqmmc might be a fixed 1.8V regulator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 3600000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) host->flags &= ~SDHCI_SIGNALING_330;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) mmc_hostname(mmc), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) mmc->supply.vqmmc = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) SDHCI_SUPPORT_DDR50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) * The SDHCI controller in a SoC might support HS200/HS400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) * but if the board is modeled such that the IO lines are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) * connected to 1.8v then HS200/HS400 cannot be supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) * Disable HS200/HS400 if the board does not have 1.8v connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) * to the IO lines. (Applicable for other modes in 1.8v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) SDHCI_SUPPORT_DDR50))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) /* SDR104 supports also implies SDR50 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) if (host->caps1 & SDHCI_SUPPORT_SDR104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) /* SD3.0: SDR104 is supported so (for eMMC) the caps2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) * field can be promoted to support HS200.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) mmc->caps2 |= MMC_CAP2_HS200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) mmc->caps |= MMC_CAP_UHS_SDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) (host->caps1 & SDHCI_SUPPORT_HS400))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) mmc->caps2 |= MMC_CAP2_HS400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) (IS_ERR(mmc->supply.vqmmc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 1300000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) mmc->caps |= MMC_CAP_UHS_DDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) /* Does the host need tuning for SDR50? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) if (host->caps1 & SDHCI_USE_SDR50_TUNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) host->flags |= SDHCI_SDR50_NEEDS_TUNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) /* Driver Type(s) (A, C, D) supported by the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) if (host->caps1 & SDHCI_DRIVER_TYPE_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) if (host->caps1 & SDHCI_DRIVER_TYPE_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) if (host->caps1 & SDHCI_DRIVER_TYPE_D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) /* Initial value for re-tuning timer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) host->caps1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) * In case Re-tuning Timer is not disabled, the actual value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) * re-tuning timer will be 2 ^ (n - 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) if (host->tuning_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) host->tuning_count = 1 << (host->tuning_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) /* Re-tuning mode supported by the Host Controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) ocr_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) * According to SD Host Controller spec v3.00, if the Host System
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) * can afford more than 150mA, Host Driver should set XPC to 1. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) * the value is meaningful only if Voltage Support in the Capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) * register is set. The actual current value is 4 times the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) int curr = regulator_get_current_limit(mmc->supply.vmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) if (curr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) /* convert to SDHCI_MAX_CURRENT format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) curr = curr/1000; /* convert to mA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) max_current_caps =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) if (host->caps & SDHCI_CAN_VDD_330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) max_current_caps) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) SDHCI_MAX_CURRENT_MULTIPLIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) if (host->caps & SDHCI_CAN_VDD_300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) max_current_caps) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) SDHCI_MAX_CURRENT_MULTIPLIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) if (host->caps & SDHCI_CAN_VDD_180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) ocr_avail |= MMC_VDD_165_195;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) max_current_caps) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) SDHCI_MAX_CURRENT_MULTIPLIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) /* If OCR set by host, use it instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) if (host->ocr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) ocr_avail = host->ocr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) /* If OCR set by external regulators, give it highest prio. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) if (mmc->ocr_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) ocr_avail = mmc->ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) mmc->ocr_avail = ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) mmc->ocr_avail_sdio = ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) if (host->ocr_avail_sdio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) mmc->ocr_avail_sd = ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) if (host->ocr_avail_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) mmc->ocr_avail_sd &= host->ocr_avail_sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) else /* normal SD controllers don't support 1.8V */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) mmc->ocr_avail_mmc = ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) if (host->ocr_avail_mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) if (mmc->ocr_avail == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) pr_err("%s: Hardware doesn't report any support voltages.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) goto unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) host->flags |= SDHCI_SIGNALING_180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) host->flags |= SDHCI_SIGNALING_120;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) * Maximum number of sectors in one transfer. Limited by SDMA boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) * is less anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) mmc->max_req_size = 524288;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) * Maximum number of segments. Depends on if the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) * can do scatter/gather or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) mmc->max_segs = SDHCI_MAX_SEGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) } else if (host->flags & SDHCI_USE_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) mmc->max_segs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) if (swiotlb_max_segment()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) IO_TLB_SEGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) mmc->max_req_size = min(mmc->max_req_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) max_req_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) } else { /* PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) mmc->max_segs = SDHCI_MAX_SEGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) * Maximum segment size. Could be one segment with the maximum number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) * of bytes. When doing hardware scatter/gather, each entry cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) * be larger than 64 KiB though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) if (host->flags & SDHCI_USE_ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) mmc->max_seg_size = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) mmc->max_seg_size = 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) mmc->max_seg_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) * Maximum block size. This varies from controller to controller and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) * is specified in the capabilities register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) mmc->max_blk_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) SDHCI_MAX_BLOCK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) if (mmc->max_blk_size >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) mmc->max_blk_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) mmc->max_blk_size = 512 << mmc->max_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) * Maximum block count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) if (mmc->max_segs == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) /* This may alter mmc->*_blk_* parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) sdhci_allocate_bounce_buffer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) if (host->sdhci_core_to_disable_vqmmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) regulator_disable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) undma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) if (host->align_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) host->adma_table_sz, host->align_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) host->align_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) host->adma_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) host->align_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) EXPORT_SYMBOL_GPL(sdhci_setup_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) void sdhci_cleanup_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) if (host->sdhci_core_to_disable_vqmmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) regulator_disable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) if (host->align_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) host->adma_table_sz, host->align_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) host->align_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) if (host->use_external_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) sdhci_external_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) host->adma_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) host->align_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) int __sdhci_add_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) if ((mmc->caps2 & MMC_CAP2_CQE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) mmc->caps2 &= ~MMC_CAP2_CQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) mmc->cqe_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) host->complete_wq = alloc_workqueue("sdhci", flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) if (!host->complete_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) INIT_WORK(&host->complete_work, sdhci_complete_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) timer_setup(&host->timer, sdhci_timeout_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) init_waitqueue_head(&host->buf_ready_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) sdhci_init(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) IRQF_SHARED, mmc_hostname(mmc), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) pr_err("%s: Failed to request IRQ %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) mmc_hostname(mmc), host->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) goto unwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) ret = sdhci_led_register(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) pr_err("%s: Failed to register LED device: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) mmc_hostname(mmc), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) goto unirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) goto unled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) pr_info("%s: SDHCI controller on %s [%s] using %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) host->use_external_dma ? "External DMA" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) (host->flags & SDHCI_USE_ADMA) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) sdhci_enable_card_detection(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) unled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) sdhci_led_unregister(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) unirq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) sdhci_do_reset(host, SDHCI_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) sdhci_writel(host, 0, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) free_irq(host->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) unwq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) destroy_workqueue(host->complete_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) EXPORT_SYMBOL_GPL(__sdhci_add_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) int sdhci_add_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) ret = sdhci_setup_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) ret = __sdhci_add_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) sdhci_cleanup_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) EXPORT_SYMBOL_GPL(sdhci_add_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) void sdhci_remove_host(struct sdhci_host *host, int dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) if (dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) host->flags |= SDHCI_DEVICE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) if (sdhci_has_requests(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) pr_err("%s: Controller removed during "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) " transfer!\n", mmc_hostname(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) sdhci_error_out_mrqs(host, -ENOMEDIUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) sdhci_disable_card_detection(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) mmc_remove_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) sdhci_led_unregister(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) if (!dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) sdhci_do_reset(host, SDHCI_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) sdhci_writel(host, 0, SDHCI_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) free_irq(host->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) del_timer_sync(&host->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) del_timer_sync(&host->data_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) destroy_workqueue(host->complete_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) if (host->sdhci_core_to_disable_vqmmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) regulator_disable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) if (host->align_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) host->adma_table_sz, host->align_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) host->align_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) if (host->use_external_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) sdhci_external_dma_release(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) host->adma_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) host->align_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) EXPORT_SYMBOL_GPL(sdhci_remove_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) void sdhci_free_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) mmc_free_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) EXPORT_SYMBOL_GPL(sdhci_free_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) /*****************************************************************************\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) * Driver init/exit *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) \*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) static int __init sdhci_drv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) pr_info(DRIVER_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) ": Secure Digital Host Controller Interface driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) static void __exit sdhci_drv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) module_init(sdhci_drv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) module_exit(sdhci_drv_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) module_param(debug_quirks, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) module_param(debug_quirks2, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");