^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCI driver for the High Speed UART DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Partially based on the bits found in drivers/tty/serial/mfd.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "hsu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define HSU_PCI_DMASR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define HSU_PCI_DMAISR 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define HSU_PCI_CHAN_OFFSET 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PCI_DEVICE_ID_INTEL_MFLD_HSU_DMA 0x081e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA 0x1192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static irqreturn_t hsu_pci_irq(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct hsu_dma_chip *chip = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 dmaisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) for (i = 0; i < chip->hsu->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (dmaisr & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) err = hsu_dma_get_status(chip, i, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ret |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) else if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ret |= hsu_dma_do_irq(chip, i, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) dmaisr >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return IRQ_RETVAL(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct hsu_dma_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ret = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) dev_err(&pdev->dev, "I/O memory remapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pci_try_set_mwi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) chip->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) chip->regs = pcim_iomap_table(pdev)[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) chip->length = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) chip->offset = HSU_PCI_CHAN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) chip->irq = pci_irq_vector(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ret = hsu_dma_probe(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) goto err_register_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * to have different numbers, is shared between HSU DMA and UART IPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Thus on such SoCs we are expecting that IRQ handler is called in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * UART driver only. Instead of handling the spurious interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * from HSU DMA here and waste CPU time and delay HSU UART interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * handling, disable the interrupt entirely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) disable_irq_nosync(chip->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) pci_set_drvdata(pdev, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) err_register_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) hsu_dma_remove(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void hsu_pci_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) free_irq(chip->irq, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) hsu_dma_remove(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static const struct pci_device_id hsu_pci_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MFLD_HSU_DMA), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) MODULE_DEVICE_TABLE(pci, hsu_pci_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static struct pci_driver hsu_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .name = "hsu_dma_pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) .id_table = hsu_pci_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .probe = hsu_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .remove = hsu_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) module_pci_driver(hsu_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) MODULE_DESCRIPTION("High Speed UART DMA PCI driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");