^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2016 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "cavium.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) down(&host->mmc_serializer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) up(&host->mmc_serializer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) writeq(val, host->base + MIO_EMM_INT(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int nvec, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (nvec < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* register interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) for (i = 0; i < nvec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) cvm_mmc_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 0, cvm_mmc_irq_names[i], host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int thunder_mmc_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct device_node *node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct device_node *child_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct cvm_mmc_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int ret, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pci_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ret = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ret = pci_request_regions(pdev, KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!host->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* On ThunderX these are identical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) host->dma_base = host->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) host->reg_off = 0x2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) host->reg_off_dma = 0x160;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) host->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (IS_ERR(host->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret = PTR_ERR(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ret = clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) host->sys_freq = clk_get_rate(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) spin_lock_init(&host->irq_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) sema_init(&host->mmc_serializer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) host->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) host->acquire_bus = thunder_mmc_acquire_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) host->release_bus = thunder_mmc_release_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) host->int_enable = thunder_mmc_int_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) host->use_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) host->big_dma_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) host->need_irq_handler_lock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) host->last_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ret = dma_set_mask(dev, DMA_BIT_MASK(48));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Clear out any pending interrupts that may be left over from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * bootloader. Writing 1 to the bits clears them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) writeq(127, host->base + MIO_EMM_INT_EN(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Clear DMA FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ret = thunder_mmc_register_interrupts(host, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) for_each_child_of_node(node, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * mmc_of_parse and devm* require one device per slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Create a dummy device per slot and set the node pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * the slot. The easiest way to get this is using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * of_platform_device_create.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (of_device_is_compatible(child_node, "mmc-slot")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!host->slot_pdev[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dev_info(dev, "probed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for (i = 0; i < CAVIUM_MAX_MMC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (host->slot[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) cvm_mmc_of_slot_remove(host->slot[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (host->slot_pdev[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) get_device(&host->slot_pdev[i]->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) put_device(&host->slot_pdev[i]->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void thunder_mmc_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct cvm_mmc_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u64 dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for (i = 0; i < CAVIUM_MAX_MMC; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (host->slot[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cvm_mmc_of_slot_remove(host->slot[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static const struct pci_device_id thunder_mmc_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) { 0, } /* end of table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static struct pci_driver thunder_mmc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .id_table = thunder_mmc_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .probe = thunder_mmc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .remove = thunder_mmc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) module_pci_driver(thunder_mmc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) MODULE_AUTHOR("Cavium Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);