^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/aer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/io-64-nonatomic-lo-hi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <uapi/linux/idxd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "idxd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) MODULE_VERSION(IDXD_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DRV_NAME "idxd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct idr idxd_idrs[IDXD_TYPE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct mutex idxd_idr_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct pci_device_id idxd_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* DSA ver 1.0 platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static char *idxd_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "dsa",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) const char *idxd_get_dev_name(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return idxd_name[idxd->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int idxd_setup_interrupts(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct pci_dev *pdev = idxd->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct msix_entry *msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct idxd_irq_entry *irq_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int i, msixcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) msixcnt = pci_msix_vec_count(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (msixcnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dev_err(dev, "Not MSI-X interrupt capable.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) msixcnt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!idxd->msix_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (i = 0; i < msixcnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) idxd->msix_entries[i].entry = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * We implement 1 completion list per MSI-X entry except for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * entry 0, which is for errors and others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) idxd->irq_entries = devm_kcalloc(dev, msixcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) sizeof(struct idxd_irq_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!idxd->irq_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for (i = 0; i < msixcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) idxd->irq_entries[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) idxd->irq_entries[i].idxd = idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) msix = &idxd->msix_entries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) irq_entry = &idxd->irq_entries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) idxd_misc_thread, 0, "idxd-misc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) dev_err(dev, "Failed to allocate misc interrupt.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) msix->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* first MSI-X entry is not for wq interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) idxd->num_wq_irqs = msixcnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) for (i = 1; i < msixcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) msix = &idxd->msix_entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) irq_entry = &idxd->irq_entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) init_llist_head(&idxd->irq_entries[i].pending_llist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) rc = devm_request_threaded_irq(dev, msix->vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) idxd_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) idxd_wq_thread, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) "idxd-portal", irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dev_err(dev, "Failed to allocate irq %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) msix->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) i, msix->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) idxd_unmask_error_interrupts(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) err_no_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Disable error interrupt generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) idxd_mask_error_interrupts(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pci_disable_msix(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) dev_err(dev, "No usable interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int idxd_setup_internals(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct device *dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) init_waitqueue_head(&idxd->cmd_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) idxd->groups = devm_kcalloc(dev, idxd->max_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sizeof(struct idxd_group), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!idxd->groups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for (i = 0; i < idxd->max_groups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) idxd->groups[i].idxd = idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) idxd->groups[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) idxd->groups[i].tc_a = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) idxd->groups[i].tc_b = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!idxd->wqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) idxd->engines = devm_kcalloc(dev, idxd->max_engines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) sizeof(struct idxd_engine), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!idxd->engines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) for (i = 0; i < idxd->max_wqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct idxd_wq *wq = &idxd->wqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) wq->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) wq->idxd = idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mutex_init(&wq->wq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) init_waitqueue_head(&wq->err_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) wq->max_xfer_bytes = idxd->max_xfer_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) wq->max_batch_size = idxd->max_batch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!wq->wqcfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) for (i = 0; i < idxd->max_engines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) idxd->engines[i].idxd = idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) idxd->engines[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) idxd->wq = create_workqueue(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!idxd->wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void idxd_read_table_offsets(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) union offsets_reg offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct device *dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) + sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) idxd->grpcfg_offset = offsets.grpcfg * 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) idxd->wqcfg_offset = offsets.wqcfg * 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) idxd->wqcfg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) idxd->msix_perm_offset = offsets.msix_perm * 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) idxd->msix_perm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) idxd->perfmon_offset = offsets.perfmon * 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void idxd_read_caps(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct device *dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* reading generic capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (idxd->hw.gen_cap.config_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* reading group capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) idxd->hw.group_cap.bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) idxd->max_groups = idxd->hw.group_cap.num_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) idxd->max_tokens = idxd->hw.group_cap.total_tokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) idxd->nr_tokens = idxd->max_tokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* read engine capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) idxd->hw.engine_cap.bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) idxd->max_engines = idxd->hw.engine_cap.num_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* read workqueue capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* reading operation capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) IDXD_OPCAP_OFFSET + i * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void __iomem * const *iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) idxd->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) idxd->reg_base = iomap[IDXD_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_lock_init(&idxd->dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int idxd_probe(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct pci_dev *pdev = idxd->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dev_dbg(dev, "%s entered and resetting device\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) rc = idxd_device_init_reset(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev_dbg(dev, "IDXD reset complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) idxd_read_caps(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) idxd_read_table_offsets(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) rc = idxd_setup_internals(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto err_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) rc = idxd_setup_interrupts(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto err_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dev_dbg(dev, "IDXD interrupt setup complete.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) mutex_lock(&idxd_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mutex_unlock(&idxd_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (idxd->id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto err_idr_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) idxd->major = idxd_cdev_get_major(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) err_idr_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) idxd_mask_error_interrupts(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) idxd_mask_msix_vectors(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) err_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void __iomem * const *iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dev_dbg(dev, "Mapping BARs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mask = (1 << IDXD_MMIO_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dev_dbg(dev, "Set DMA masks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dev_dbg(dev, "Alloc IDXD context\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) idxd = idxd_alloc(pdev, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) idxd_set_type(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_dbg(dev, "Set PCI master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pci_set_drvdata(pdev, idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rc = idxd_probe(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rc = idxd_setup_sysfs(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) dev_err(dev, "IDXD sysfs setup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) idxd->state = IDXD_DEV_CONF_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) idxd->hw.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct idxd_desc *desc, *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct llist_node *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) head = llist_del_all(&ie->pending_llist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) llist_for_each_entry_safe(desc, itr, head, llnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) idxd_free_desc(desc->wq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void idxd_flush_work_list(struct idxd_irq_entry *ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct idxd_desc *desc, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) list_del(&desc->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) idxd_free_desc(desc->wq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void idxd_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct idxd_device *idxd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct idxd_irq_entry *irq_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int msixcnt = pci_msix_vec_count(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rc = idxd_device_disable(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dev_err(&pdev->dev, "Disabling device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dev_dbg(&pdev->dev, "%s called\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) idxd_mask_msix_vectors(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) idxd_mask_error_interrupts(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) for (i = 0; i < msixcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) irq_entry = &idxd->irq_entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) synchronize_irq(idxd->msix_entries[i].vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) idxd_flush_pending_llist(irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) idxd_flush_work_list(irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) destroy_workqueue(idxd->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void idxd_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct idxd_device *idxd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dev_dbg(&pdev->dev, "%s called\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) idxd_cleanup_sysfs(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) idxd_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mutex_lock(&idxd_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) idr_remove(&idxd_idrs[idxd->type], idxd->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mutex_unlock(&idxd_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static struct pci_driver idxd_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .id_table = idxd_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .probe = idxd_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .remove = idxd_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .shutdown = idxd_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int __init idxd_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * If the CPU does not support write512, there's no point in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * enumerating the device. We can not utilize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pr_warn("idxd driver failed to load without MOVDIR64B.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) DRV_NAME, IDXD_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mutex_init(&idxd_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) for (i = 0; i < IDXD_TYPE_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) idr_init(&idxd_idrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err = idxd_register_bus_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) err = idxd_register_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto err_idxd_driver_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err = idxd_cdev_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto err_cdev_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) err = pci_register_driver(&idxd_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto err_pci_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) err_pci_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) idxd_cdev_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) err_cdev_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) idxd_unregister_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err_idxd_driver_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) idxd_unregister_bus_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) module_init(idxd_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void __exit idxd_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) idxd_unregister_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pci_unregister_driver(&idxd_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) idxd_cdev_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) idxd_unregister_bus_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) module_exit(idxd_exit_module);