^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Intel I/OAT DMA Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright(c) 2004 - 2015 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/aer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) MODULE_VERSION(IOAT_DMA_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const struct pci_device_id ioat_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* I/OAT v3 platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* I/OAT v3.2 platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* I/OAT v3.3 platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* I/OAT v3.4 platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void ioat_remove(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ioat_init_channel(struct ioatdma_device *ioat_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct ioatdma_chan *ioat_chan, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int ioat_dca_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) module_param(ioat_dca_enabled, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int ioat_pending_level = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) module_param(ioat_pending_level, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) MODULE_PARM_DESC(ioat_pending_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) "high-water mark for pushing ioat descriptors (default: 7)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static char ioat_interrupt_style[32] = "msix";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) module_param_string(ioat_interrupt_style, ioat_interrupt_style,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) sizeof(ioat_interrupt_style), 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) MODULE_PARM_DESC(ioat_interrupt_style,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) "set ioat interrupt style: msix (default), msi, intx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct kmem_cache *ioat_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct kmem_cache *ioat_sed_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static bool is_jf_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static bool is_snb_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static bool is_ivb_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static bool is_hsw_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static bool is_bdx_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline bool is_skx_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static bool is_xeon_cb32(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bool is_bwd_ioat(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* even though not Atom, BDX-DE has same DMA silicon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static bool is_bwd_noraid(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Perform a IOAT transaction to verify the HW works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define IOAT_TEST_SIZE 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void ioat_dma_test_callback(void *dma_async_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct completion *cmp = dma_async_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) complete(cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @ioat_dma: dma device to be tested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u8 *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u8 *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct device *dev = &ioat_dma->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dma_addr_t dma_dest, dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct completion cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) src = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dest = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kfree(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Fill in src buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) for (i = 0; i < IOAT_TEST_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) src[i] = (u8)i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Start copy, using first DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dma_chan = container_of(dma->channels.next, struct dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (dma->device_alloc_chan_resources(dma_chan) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dev_err(dev, "selftest cannot allocate chan resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (dma_mapping_error(dev, dma_src)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dev_err(dev, "mapping src buffer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (dma_mapping_error(dev, dma_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev_err(dev, "mapping dest buffer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto unmap_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) flags = DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dma_src, IOAT_TEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dev_err(dev, "Self-test prep failed, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) goto unmap_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) async_tx_ack(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) init_completion(&cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tx->callback = ioat_dma_test_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) tx->callback_param = &cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cookie = tx->tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (cookie < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dev_err(dev, "Self-test setup failed, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto unmap_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dma->device_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (tmo == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dma->device_tx_status(dma_chan, cookie, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dev_err(dev, "Self-test copy timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto unmap_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (memcmp(src, dest, IOAT_TEST_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev_err(dev, "Self-test copy failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto unmap_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unmap_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unmap_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dma->device_free_chan_resources(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) kfree(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) kfree(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * ioat_dma_setup_interrupts - setup interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @ioat_dma: ioat dma device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct pci_dev *pdev = ioat_dma->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct msix_entry *msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int i, j, msixcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u8 intrctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!strcmp(ioat_interrupt_style, "msix"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) goto msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!strcmp(ioat_interrupt_style, "msi"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!strcmp(ioat_interrupt_style, "intx"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto intx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) msix:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* The number of MSI-X vectors should equal the number of channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) msixcnt = ioat_dma->dma_dev.chancnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) for (i = 0; i < msixcnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ioat_dma->msix_entries[i].entry = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) goto msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) for (i = 0; i < msixcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) msix = &ioat_dma->msix_entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ioat_chan = ioat_chan_by_index(ioat_dma, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = devm_request_irq(dev, msix->vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ioat_dma_do_interrupt_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) "ioat-msix", ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) msix = &ioat_dma->msix_entries[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ioat_chan = ioat_chan_by_index(ioat_dma, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) devm_free_irq(dev, msix->vector, ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ioat_dma->irq_mode = IOAT_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) msi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) err = pci_enable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto intx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) "ioat-msi", ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) pci_disable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto intx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ioat_dma->irq_mode = IOAT_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) intx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) IRQF_SHARED, "ioat-intx", ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ioat_dma->irq_mode = IOAT_INTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (is_bwd_ioat(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ioat_intr_quirk(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) err_no_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Disable all interrupt generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ioat_dma->irq_mode = IOAT_NOIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) dev_err(dev, "no usable interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Disable all interrupt generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static int ioat_probe(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct pci_dev *pdev = ioat_dma->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) SMP_CACHE_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!ioat_dma->completion_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ioat_enumerate_channels(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dma_cap_set(DMA_MEMCPY, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dma->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!dma->chancnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dev_err(dev, "channel enumeration error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) goto err_setup_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) err = ioat_dma_setup_interrupts(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto err_setup_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) err = ioat3_dma_self_test(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto err_self_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) err_self_test:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ioat_disable_interrupts(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) err_setup_interrupts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dma_pool_destroy(ioat_dma->completion_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static int ioat_register(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int err = dma_async_device_register(&ioat_dma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ioat_disable_interrupts(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) dma_pool_destroy(ioat_dma->completion_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ioat_disable_interrupts(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ioat_kobject_del(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dma_async_device_unregister(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * ioat_enumerate_channels - find and initialize the device's channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * @ioat_dma: the ioat dma device to be enumerated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct device *dev = &ioat_dma->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) u8 xfercap_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) INIT_LIST_HEAD(&dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dma->chancnt &= 0x1f; /* bits [4:0] valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) xfercap_log &= 0x1f; /* bits [4:0] valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (xfercap_log == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) for (i = 0; i < dma->chancnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ioat_init_channel(ioat_dma, ioat_chan, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ioat_chan->xfercap_log = xfercap_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_lock_init(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (ioat_reset_hw(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dma->chancnt = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * ioat_free_chan_resources - release all the descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * @c: the channel to be cleaned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void ioat_free_chan_resources(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) const int total_descs = 1 << ioat_chan->alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Before freeing channel resources first check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * if they have been previously allocated for this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!ioat_chan->ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ioat_stop(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ioat_reset_hw(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Put LTR to idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (ioat_dma->version >= IOAT_VER_3_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ioat_chan->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) IOAT_CHAN_LTR_SWSEL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) spin_lock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) descs = ioat_ring_space(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) for (i = 0; i < descs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ioat_free_ring_ent(desc, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (descs < total_descs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) total_descs - descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) for (i = 0; i < total_descs - descs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ioat_free_ring_ent(desc, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (i = 0; i < ioat_chan->desc_chunks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ioat_chan->descs[i].virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ioat_chan->descs[i].hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ioat_chan->descs[i].virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ioat_chan->descs[i].hw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ioat_chan->desc_chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) kfree(ioat_chan->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ioat_chan->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ioat_chan->alloc_order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ioat_chan->completion_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spin_unlock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ioat_chan->last_completion = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ioat_chan->completion_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ioat_chan->dmacount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @chan: channel to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int ioat_alloc_chan_resources(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct ioat_ring_ent **ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) u32 chanerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* have we already been set up? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (ioat_chan->ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 1 << ioat_chan->alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Setup register to interrupt and write completion status on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* allocate a completion writeback area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ioat_chan->completion =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) GFP_NOWAIT, &ioat_chan->completion_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!ioat_chan->completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) writel(((u64)ioat_chan->completion_dma) >> 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) order = IOAT_MAX_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) spin_lock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ioat_chan->ring = ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ioat_chan->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ioat_chan->issued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ioat_chan->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ioat_chan->alloc_order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) set_bit(IOAT_RUN, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) spin_unlock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Setting up LTR values for 3.4 or later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) u32 lat_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) IOAT_CHAN_LTR_ACTIVE_SNLATSCALE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) IOAT_CHAN_LTR_ACTIVE_SNREQMNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) writel(lat_val, ioat_chan->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) IOAT_CHAN_LTR_ACTIVE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) lat_val = IOAT_CHAN_LTR_IDLE_SNVAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) IOAT_CHAN_LTR_IDLE_SNLATSCALE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) IOAT_CHAN_LTR_IDLE_SNREQMNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) writel(lat_val, ioat_chan->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) IOAT_CHAN_LTR_IDLE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Select to active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ioat_chan->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) IOAT_CHAN_LTR_SWSEL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ioat_start_null_desc(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* check that we got off the ground */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) status = ioat_chansts(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (is_ioat_active(status) || is_ioat_idle(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return 1 << ioat_chan->alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dev_WARN(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) "failed to start channel chanerr: %#x\n", chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ioat_free_chan_resources(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* common channel initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ioat_init_channel(struct ioatdma_device *ioat_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct ioatdma_chan *ioat_chan, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ioat_chan->ioat_dma = ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) spin_lock_init(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ioat_chan->dma_chan.device = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) dma_cookie_init(&ioat_chan->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ioat_dma->idx[idx] = ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) tasklet_setup(&ioat_chan->cleanup_task, ioat_cleanup_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int i, src_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct page *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct page *xor_srcs[IOAT_NUM_SRC_TEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dma_addr_t dest_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) u8 cmp_byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) u32 cmp_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) u32 xor_val_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct completion cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned long tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct device *dev = &ioat_dma->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) u8 op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dev_dbg(dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!dma_has_cap(DMA_XOR, dma->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!xor_srcs[src_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) while (src_idx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) __free_page(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) dest = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (!dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) while (src_idx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) __free_page(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Fill in src buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u8 *ptr = page_address(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < PAGE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ptr[i] = (1 << src_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cmp_byte ^= (u8) (1 << src_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) (cmp_byte << 8) | cmp_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) memset(page_address(dest), 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dma_chan = container_of(dma->channels.next, struct dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (dma->device_alloc_chan_resources(dma_chan) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* test xor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) op = IOAT_OP_XOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (dma_mapping_error(dev, dest_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (dma_mapping_error(dev, dma_srcs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) IOAT_NUM_SRC_TEST, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dev_err(dev, "Self-test xor prep failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) async_tx_ack(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) init_completion(&cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) tx->callback = ioat_dma_test_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tx->callback_param = &cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) cookie = tx->tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (cookie < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev_err(dev, "Self-test xor setup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) dma->device_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (tmo == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev_err(dev, "Self-test xor timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) u32 *ptr = page_address(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ptr[i] != cmp_word) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) dev_err(dev, "Self-test xor failed compare\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* skip validate if the capability is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) op = IOAT_OP_XOR_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* validate the sources with the destintation page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) xor_val_srcs[i] = xor_srcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) xor_val_srcs[i] = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) xor_val_result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (dma_mapping_error(dev, dma_srcs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) &xor_val_result, DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (!tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev_err(dev, "Self-test zero prep failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) async_tx_ack(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) init_completion(&cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) tx->callback = ioat_dma_test_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) tx->callback_param = &cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) cookie = tx->tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (cookie < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev_err(dev, "Self-test zero setup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dma->device_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (tmo == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dev_err(dev, "Self-test validate timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (xor_val_result != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev_err(dev, "Self-test validate failed compare\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) memset(page_address(dest), 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* test for non-zero parity sum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) op = IOAT_OP_XOR_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) xor_val_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (dma_mapping_error(dev, dma_srcs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) &xor_val_result, DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (!tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dev_err(dev, "Self-test 2nd zero prep failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) async_tx_ack(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) init_completion(&cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) tx->callback = ioat_dma_test_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) tx->callback_param = &cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) cookie = tx->tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (cookie < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dev_err(dev, "Self-test 2nd zero setup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dma->device_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (tmo == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) dev_err(dev, "Self-test 2nd validate timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (xor_val_result != SUM_CHECK_P_RESULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) dev_err(dev, "Self-test validate failed compare\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dma_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (op == IOAT_OP_XOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) } else if (op == IOAT_OP_XOR_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dma->device_free_chan_resources(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) src_idx = IOAT_NUM_SRC_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) while (src_idx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) __free_page(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) __free_page(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) rc = ioat_dma_self_test(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) rc = ioat_xor_val_self_test(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct dma_device *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) u32 errmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * if we have descriptor write back error status, we mask the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * error interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (ioat_dma->cap & IOAT_CAP_DWBES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) list_for_each_entry(c, &dma->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) errmask = readl(ioat_chan->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) IOAT_CHANERR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) IOAT_CHANERR_XOR_Q_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) writel(errmask, ioat_chan->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) IOAT_CHANERR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct pci_dev *pdev = ioat_dma->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int dca_en = system_has_dca_enabled(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct dma_device *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u16 val16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dma->device_issue_pending = ioat_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) dma->device_free_chan_resources = ioat_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ioat_dma->cap &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* dca is incompatible with raid operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (ioat_dma->cap & IOAT_CAP_XOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) dma->max_xor = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) dma_cap_set(DMA_XOR, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dma->device_prep_dma_xor = ioat_prep_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) dma->device_prep_dma_xor_val = ioat_prep_xor_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (ioat_dma->cap & IOAT_CAP_PQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dma->device_prep_dma_pq = ioat_prep_pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dma->device_prep_dma_pq_val = ioat_prep_pq_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dma_cap_set(DMA_PQ, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (ioat_dma->cap & IOAT_CAP_RAID16SS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) dma_set_maxpq(dma, 16, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) dma_set_maxpq(dma, 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) dma->device_prep_dma_xor = ioat_prep_pqxor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dma_cap_set(DMA_XOR, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (ioat_dma->cap & IOAT_CAP_RAID16SS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dma->max_xor = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) dma->max_xor = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) dma->device_tx_status = ioat_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* starting with CB3.3 super extended descriptors are supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) char pool_name[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) for (i = 0; i < MAX_SED_POOLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) snprintf(pool_name, 14, "ioat_hw%d_sed", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* allocate SED DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) SED_SIZE * (i + 1), 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!ioat_dma->sed_hw_pool[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) dma_cap_set(DMA_PRIVATE, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) err = ioat_probe(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) list_for_each_entry(c, &dma->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) writel(IOAT_DMA_DCA_ANY_CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) err = ioat_register(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ioat_kobject_add(ioat_dma, &ioat_ktype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (dca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* disable relaxed ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return pcibios_err_to_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* clear relaxed ordering enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) val16 &= ~IOAT_DEVCTRL_ROE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return pcibios_err_to_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (ioat_dma->cap & IOAT_CAP_DPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) writeb(ioat_pending_level + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void ioat_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (!ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) for (i = 0; i < IOAT_MAX_CHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ioat_chan = ioat_dma->idx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (!ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * Synchronization rule for del_timer_sync():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * - The caller must not hold locks which would prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * completion of the timer's handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * So prep_lock cannot be held before calling it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) del_timer_sync(&ioat_chan->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* this should quiesce then reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ioat_reset_hw(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) ioat_disable_interrupts(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void ioat_resume(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) u32 chanerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) for (i = 0; i < IOAT_MAX_CHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ioat_chan = ioat_dma->idx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* no need to reset as shutdown already did that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #define DRV_NAME "ioatdma"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) pci_channel_state_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* quiesce and block I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) ioat_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (pci_enable_device_mem(pdev) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) "Failed to enable PCIe device after reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) result = PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pci_wake_from_d3(pdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static void ioat_pcie_error_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* initialize and bring everything back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ioat_resume(ioat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static const struct pci_error_handlers ioat_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .error_detected = ioat_pcie_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .slot_reset = ioat_pcie_error_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .resume = ioat_pcie_error_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static struct pci_driver ioat_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) .id_table = ioat_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .probe = ioat_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .remove = ioat_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .shutdown = ioat_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .err_handler = &ioat_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static void release_ioatdma(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct ioatdma_device *d = to_ioatdma_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) for (i = 0; i < IOAT_MAX_CHANS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) kfree(d->idx[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) dma_pool_destroy(d->completion_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static struct ioatdma_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) d->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) d->reg_base = iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) d->dma_dev.device_release = release_ioatdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) void __iomem * const *iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct ioatdma_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) err = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (!iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) pci_set_drvdata(pdev, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) device->version = readb(device->reg_base + IOAT_VER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (device->version >= IOAT_VER_3_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) ioat_dca_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (device->version >= IOAT_VER_3_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (is_skx_ioat(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) device->version = IOAT_VER_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) err = ioat3_dma_probe(device, ioat_dca_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (device->version >= IOAT_VER_3_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) pci_enable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) pci_disable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void ioat_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct ioatdma_device *device = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ioat_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) dev_err(&pdev->dev, "Removing dma and dca services\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (device->dca) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) unregister_dca_provider(device->dca, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) free_dca_provider(device->dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) device->dca = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) pci_disable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ioat_dma_remove(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static int __init ioat_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) DRV_NAME, IOAT_DMA_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 0, SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!ioat_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (!ioat_sed_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) goto err_ioat_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) err = pci_register_driver(&ioat_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) goto err_ioat3_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) err_ioat3_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) kmem_cache_destroy(ioat_sed_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) err_ioat_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) kmem_cache_destroy(ioat_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) module_init(ioat_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static void __exit ioat_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) pci_unregister_driver(&ioat_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) kmem_cache_destroy(ioat_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) module_exit(ioat_exit_module);