^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __CARD_BASE_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __CARD_BASE_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * IBM Accelerator Family 'GenWQE'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) Copyright IBM Corp. 2013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Author: Michael Jung <mijung@gmx.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Author: Michael Ruettger <michael@ibmra.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Interfaces within the GenWQE module. Defines genwqe_card and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * ddcb_queue as well as ddcb_requ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/genwqe/genwqe_card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "genwqe_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Compile parameters, some of them appear in debugfs for later adjustment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define GENWQE_DDCB_MAX 32 /* DDCBs on the work-queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define GENWQE_POLLING_ENABLED 0 /* in case of irqs not working */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define GENWQE_DDCB_SOFTWARE_TIMEOUT 10 /* timeout per DDCB in seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define GENWQE_KILL_TIMEOUT 8 /* time until process gets killed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define GENWQE_VF_JOBTIMEOUT_MSEC 250 /* 250 msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define GENWQE_PF_JOBTIMEOUT_MSEC 8000 /* 8 sec should be ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define GENWQE_HEALTH_CHECK_INTERVAL 4 /* <= 0: disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Sysfs attribute groups used when we create the genwqe device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) extern const struct attribute_group *genwqe_attribute_groups[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Config space for Genwqe5 A7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * struct genwqe_reg - Genwqe data dump functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct genwqe_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * enum genwqe_dbg_type - Specify chip unit to dump/debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) enum genwqe_dbg_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) GENWQE_DBG_UNIT1 = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) GENWQE_DBG_UNIT2 = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) GENWQE_DBG_UNIT3 = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) GENWQE_DBG_UNIT4 = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) GENWQE_DBG_UNIT5 = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) GENWQE_DBG_UNIT6 = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) GENWQE_DBG_UNIT7 = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) GENWQE_DBG_REGS = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) GENWQE_DBG_DMA = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) GENWQE_DBG_UNITS = 10, /* max number of possible debug units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Software error injection to simulate card failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Genwqe card description and management data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Error-handling in case of card malfunction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * ------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * If the card is detected to be defective the outside environment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * will cause the PCI layer to call deinit (the cleanup function for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * probe). This is the same effect like doing a unbind/bind operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * on the card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * The genwqe card driver implements a health checking thread which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * verifies the card function. If this detects a problem the cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * device is being shutdown and restarted again, along with a reset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * the card and queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * All functions accessing the card device return either -EIO or -ENODEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * code to indicate the malfunction to the user. The user has to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * the file descriptor and open a new one, once the card becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * available again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * If the open file descriptor is setup to receive SIGIO, the signal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * genereated for the application which has to provide a handler to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * react on it. If the application does not close the open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * file descriptor a SIGKILL is send to enforce freeing the cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * I did not find a different way to prevent kernel problems due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * reference counters for the cards character devices getting out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * sync. The character device deallocation does not block, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * there is still an open file descriptor pending. If this pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * descriptor is closed, the data structures used by the character
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * device is reinstantiated, which will lead to the reference counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * dropping below the allowed values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Card recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * -------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * To test the internal driver recovery the following command can be used:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * struct dma_mapping_type - Mapping type definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * To avoid memcpying data arround we use user memory directly. To do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * this we need to pin/swap-in the memory and request a DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) enum dma_mapping_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * struct dma_mapping - Information about memory mappings done by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct dma_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) enum dma_mapping_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void *u_vaddr; /* user-space vaddr/non-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void *k_vaddr; /* kernel-space vaddr/non-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dma_addr_t dma_addr; /* physical DMA address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct page **page_list; /* list of pages used by user buff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_addr_t *dma_list; /* list of dma addresses per page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int nr_pages; /* number of pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned int size; /* size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct list_head card_list; /* list of usr_maps for card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct list_head pin_list; /* list of pinned memory for dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int write; /* writable map? useful in unmapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline void genwqe_mapping_init(struct dma_mapping *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) enum dma_mapping_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) memset(m, 0, sizeof(*m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) m->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) m->write = 1; /* Assume the maps we create are R/W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * struct ddcb_queue - DDCB queue data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @ddcb_max: Number of DDCBs on the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @ddcb_next: Next free DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @ddcb_act: Next DDCB supposed to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @ddcb_seq: Sequence number of last DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @ddcbs_in_flight: Currently enqueued DDCBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @ddcbs_completed: Number of already completed DDCBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @return_on_busy: Number of -EBUSY returns on full queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * @wait_on_busy: Number of waits on full queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * @ddcb_daddr: DMA address of first DDCB in the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @ddcb_req: Associated requests (one per DDCB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @ddcb_waitqs: Associated wait queues (one per DDCB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @ddcb_lock: Lock to protect queuing operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @ddcb_waitq: Wait on next DDCB finishing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct ddcb_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int ddcb_max; /* amount of DDCBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int ddcb_next; /* next available DDCB num */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int ddcb_act; /* DDCB to be processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u16 ddcb_seq; /* slc seq num */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned int ddcbs_in_flight; /* number of ddcbs in processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned int ddcbs_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned int ddcbs_max_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned int return_on_busy; /* how many times -EBUSY? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int wait_on_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dma_addr_t ddcb_daddr; /* DMA address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct ddcb_requ **ddcb_req; /* ddcb processing parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spinlock_t ddcb_lock; /* exclusive access to queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) wait_queue_head_t busy_waitq; /* wait for ddcb processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* registers or the respective queue to be used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 IO_QUEUE_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 IO_QUEUE_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 IO_QUEUE_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u32 IO_QUEUE_INITSQN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u32 IO_QUEUE_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 IO_QUEUE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 IO_QUEUE_WTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 IO_QUEUE_ERRCNTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 IO_QUEUE_LRW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * GFIR, SLU_UNITCFG, APP_UNITCFG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct genwqe_ffdc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned int entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct genwqe_reg *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * struct genwqe_dev - GenWQE device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @card_state: Card operation state, see above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * @ffdc: First Failure Data Capture buffers for each unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @card_thread: Working thread to operate the DDCB queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @card_waitq: Wait queue used in card_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @queue: DDCB queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * @health_thread: Card monitoring thread (only for PFs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @health_waitq: Wait queue used in health_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @pci_dev: Associated PCI device (function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @mmio: Base address of 64-bit register space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @mmio_len: Length of register area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @file_lock: Lock to protect access to file_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * @file_list: List of all processes with open GenWQE file descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * This struct contains all information needed to communicate with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * GenWQE card. It is initialized when a GenWQE device is found and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * destroyed when it goes away. It holds data to maintain the queue as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * well as data needed to feed the user interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct genwqe_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) enum genwqe_card_state card_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spinlock_t print_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int card_idx; /* card index 0..CARD_NO_MAX-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u64 flags; /* general flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* FFDC data gathering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* DDCB workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct task_struct *card_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) wait_queue_head_t queue_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct ddcb_queue queue; /* genwqe DDCB queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned int irqs_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Card health checking thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct task_struct *health_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) wait_queue_head_t health_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int use_platform_recovery; /* use platform recovery mechanisms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* char device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dev_t devnum_genwqe; /* major/minor num card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct class *class_genwqe; /* reference to class object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct device *dev; /* for device creation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct cdev cdev_genwqe; /* char device for card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct dentry *debugfs_root; /* debugfs card root directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct dentry *debugfs_genwqe; /* debugfs driver root directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* pci resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct pci_dev *pci_dev; /* PCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void __iomem *mmio; /* BAR-0 MMIO start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long mmio_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int num_vfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int is_privileged; /* access to all regs possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* config regs which we need often */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u64 slu_unitcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u64 app_unitcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u64 softreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u64 err_inject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u64 last_gfir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) char app_name[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) spinlock_t file_lock; /* lock for open files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct list_head file_list; /* list of open files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* debugfs parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ddcb_software_timeout; /* wait until DDCB times out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int skip_recovery; /* circumvention if recovery fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int kill_timeout; /* wait after sending SIGKILL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * enum genwqe_requ_state - State of a DDCB execution request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) enum genwqe_requ_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) GENWQE_REQU_NEW = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) GENWQE_REQU_ENQUEUED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) GENWQE_REQU_TAPPED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) GENWQE_REQU_FINISHED = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) GENWQE_REQU_STATE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * struct genwqe_sgl - Scatter gather list describing user-space memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @sgl: scatter gather list needs to be 128 byte aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @sgl_dma_addr: dma address of sgl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * @sgl_size: size of area used for sgl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * @user_addr: user-space address of memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * @user_size: size of user-space memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @page: buffer for partial pages if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @page_dma_addr: dma address partial pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @write: should we write it back to userspace?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct genwqe_sgl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_addr_t sgl_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct sg_entry *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) size_t sgl_size; /* size of sgl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) void __user *user_addr; /* user-space base-address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) size_t user_size; /* size of memory area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long fpage_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) size_t fpage_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) size_t lpage_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void *fpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dma_addr_t fpage_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void *lpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dma_addr_t lpage_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void __user *user_addr, size_t user_size, int write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dma_addr_t *dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * struct ddcb_requ - Kernel internal representation of the DDCB request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @cmd: User space representation of the DDCB execution request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct ddcb_requ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* kernel specific content */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) enum genwqe_requ_state req_state; /* request status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int num; /* ddcb_no for this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct ddcb_queue *queue; /* associated queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct dma_mapping dma_mappings[DDCB_FIXUPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct genwqe_sgl sgls[DDCB_FIXUPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* kernel/user shared content */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct genwqe_debug_data debug_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * struct genwqe_file - Information for open GenWQE devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct genwqe_file {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct genwqe_dev *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct genwqe_driver *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct file *filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct fasync_struct *async_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct pid *opener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct list_head list; /* entry in list of open files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spinlock_t map_lock; /* lock for dma_mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct list_head map_list; /* list of dma_mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spinlock_t pin_lock; /* lock for pinned memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct list_head pin_list; /* list of pinned memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int genwqe_finish_queue(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int genwqe_release_service_layer(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * genwqe_get_slu_id() - Read Service Layer Unit Id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Return: 0x00: Development code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * 0x01: SLC1 (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * 0x02: SLC2 (sept2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * 0x03: SLC2 (feb2013, generic driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return (int)((cd->slu_unitcfg >> 32) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u8 genwqe_card_type(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int genwqe_card_reset(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int genwqe_device_create(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int genwqe_device_remove(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void genwqe_init_debugfs(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) void genqwe_exit_debugfs(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int genwqe_read_softreset(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Hardware Circumventions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int genwqe_flash_readback_fails(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * genwqe_write_vreg() - Write register in VF window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @cd: genwqe device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @reg: register address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @val: value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @func: 0: PF, 1: VF0, ..., 15: VF14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * genwqe_read_vreg() - Read register in VF window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @cd: genwqe device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @reg: register address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * @func: 0: PF, 1: VF0, ..., 15: VF14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Return: content of the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* FFDC Buffer Management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct genwqe_reg *regs, unsigned int max_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) unsigned int max_regs, int all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct genwqe_reg *regs, unsigned int max_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int genwqe_init_debug_data(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct genwqe_debug_data *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void genwqe_init_crc32(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Memory allocation/deallocation; dma address handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) void *uaddr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline bool dma_mapping_used(struct dma_mapping *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return m->size != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * __genwqe_execute_ddcb() - Execute DDCB request with addr translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * This function will do the address translation changes to the DDCBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * according to the definitions required by the ATS field. It looks up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * the memory allocation buffer or does vmap/vunmap for the respective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * user-space buffers, inclusive page pinning and scatter gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * buildup and teardown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int __genwqe_execute_ddcb(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct genwqe_ddcb_cmd *cmd, unsigned int f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * This version will not do address translation or any modification of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * entirely prepared by the driver itself. That means the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * DMA addresses are already in the DDCB and do not need any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct genwqe_ddcb_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned int f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int __genwqe_enqueue_ddcb(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct ddcb_requ *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned int f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dma_addr_t *dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void *vaddr, dma_addr_t dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* Base clock frequency in MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int genwqe_base_clock_frequency(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Before FFDC is captured the traps should be stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) void genwqe_stop_traps(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void genwqe_start_traps(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Hardware circumvention */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) bool genwqe_need_err_masking(struct genwqe_dev *cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * genwqe_is_privileged() - Determine operation mode for PCI function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * On Intel with SRIOV support we see:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * PF: is_physfn = 1 is_virtfn = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * VF: is_physfn = 0 is_virtfn = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * On Systems with no SRIOV support _and_ virtualized systems we get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * is_physfn = 0 is_virtfn = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Other vendors have individual pci device ids to distinguish between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * virtual function drivers and physical function drivers. GenWQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * unfortunately has just on pci device id for both, VFs and PF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * The following code is used to distinguish if the card is running in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * privileged mode, either as true PF or in a virtualized system with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * full register access e.g. currently on PowerPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * if (pci_dev->is_virtfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * cd->is_privileged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * != IO_ILLEGAL_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static inline int genwqe_is_privileged(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return cd->is_privileged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif /* __CARD_BASE_H__ */