^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef _IDXD_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define _IDXD_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sbitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/percpu-rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define IDXD_DRIVER_VERSION "1.00"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) extern struct kmem_cache *idxd_desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct idxd_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct idxd_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define IDXD_REG_TIMEOUT 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define IDXD_DRAIN_TIMEOUT 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) enum idxd_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) IDXD_TYPE_UNKNOWN = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) IDXD_TYPE_DSA = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) IDXD_TYPE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define IDXD_NAME_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct idxd_device_driver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct device_driver drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct idxd_irq_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct llist_head pending_llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct list_head work_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct idxd_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct device conf_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct grpcfg grpcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int num_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int num_wqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool use_token_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 tokens_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u8 tokens_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int tc_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int tc_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define IDXD_MAX_PRIORITY 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) enum idxd_wq_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) IDXD_WQ_DISABLED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) IDXD_WQ_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) enum idxd_wq_flag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) WQ_FLAG_DEDICATED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) enum idxd_wq_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) IDXD_WQT_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) IDXD_WQT_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) IDXD_WQT_USER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct idxd_cdev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct idxd_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct cdev cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define IDXD_ALLOCATED_BATCH_SIZE 128U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define WQ_NAME_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define WQ_TYPE_SIZE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) enum idxd_op_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) IDXD_OP_BLOCK = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) IDXD_OP_NONBLOCK = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) enum idxd_complete_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) IDXD_COMPLETE_NORMAL = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) IDXD_COMPLETE_ABORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct idxd_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct idxd_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct idxd_wq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void __iomem *dportal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct device conf_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct idxd_cdev *idxd_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct wait_queue_head err_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) enum idxd_wq_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct idxd_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int client_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct mutex wq_lock; /* mutex for workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u32 priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) enum idxd_wq_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) union wqcfg *wqcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 vec_ptr; /* interrupt steering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct dsa_hw_desc **hw_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int num_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct dsa_completion_record *compls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dma_addr_t compls_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int compls_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct idxd_desc **descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct sbitmap_queue sbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct idxd_dma_chan *idxd_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) char name[WQ_NAME_SIZE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u64 max_xfer_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 max_batch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct idxd_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct device conf_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct idxd_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* shadow registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct idxd_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) union gen_cap_reg gen_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) union wq_cap_reg wq_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) union group_cap_reg group_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) union engine_cap_reg engine_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct opcap opcap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) enum idxd_device_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) IDXD_DEV_HALTED = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) IDXD_DEV_DISABLED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) IDXD_DEV_CONF_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) IDXD_DEV_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) enum idxd_device_flag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) IDXD_FLAG_CONFIGURABLE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) IDXD_FLAG_CMD_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct idxd_dma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct dma_device dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct idxd_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) enum idxd_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct device conf_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct idxd_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) enum idxd_device_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u8 cmd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) spinlock_t dev_lock; /* spinlock for device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct completion *cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct idxd_group *groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct idxd_wq *wqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct idxd_engine *engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int num_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 msix_perm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 wqcfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 grpcfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 perfmon_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u64 max_xfer_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 max_batch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int max_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int max_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int max_tokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int max_wqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int max_wq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int token_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int nr_tokens; /* non-reserved tokens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int wqcfg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) union sw_err_reg sw_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) wait_queue_head_t cmd_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct msix_entry *msix_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int num_wq_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct idxd_irq_entry *irq_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct idxd_dma_dev *idxd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* IDXD software descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct idxd_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct dsa_hw_desc *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dma_addr_t desc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct dsa_completion_record *completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dma_addr_t compl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct dma_async_tx_descriptor txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct llist_node llnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct idxd_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) extern struct bus_type dsa_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline bool wq_dedicated(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) enum idxd_portal_prot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) IDXD_PORTAL_UNLIMITED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) IDXD_PORTAL_LIMITED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return prot * 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline int idxd_get_wq_portal_full_offset(int wq_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) enum idxd_portal_prot prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline void idxd_set_type(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct pci_dev *pdev = idxd->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) idxd->type = IDXD_TYPE_DSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) idxd->type = IDXD_TYPE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void idxd_wq_get(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) wq->client_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void idxd_wq_put(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) wq->client_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline int idxd_wq_refcount(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return wq->client_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) const char *idxd_get_dev_name(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int idxd_register_bus_type(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void idxd_unregister_bus_type(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int idxd_setup_sysfs(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void idxd_cleanup_sysfs(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int idxd_register_driver(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void idxd_unregister_driver(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* device interrupt control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) irqreturn_t idxd_irq_handler(int vec, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) irqreturn_t idxd_misc_thread(int vec, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) irqreturn_t idxd_wq_thread(int irq, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) void idxd_mask_error_interrupts(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) void idxd_unmask_error_interrupts(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void idxd_mask_msix_vectors(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* device control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int idxd_device_init_reset(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int idxd_device_enable(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int idxd_device_disable(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void idxd_device_reset(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) void idxd_device_cleanup(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int idxd_device_config(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void idxd_device_wqs_clear_state(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* work queue control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int idxd_wq_alloc_resources(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) void idxd_wq_free_resources(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int idxd_wq_enable(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int idxd_wq_disable(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) void idxd_wq_drain(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) void idxd_wq_reset(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int idxd_wq_map_portal(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void idxd_wq_unmap_portal(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void idxd_wq_disable_cleanup(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* dmaengine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int idxd_register_dma_device(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) void idxd_unregister_dma_device(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int idxd_register_dma_channel(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) void idxd_unregister_dma_channel(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void idxd_dma_complete_txd(struct idxd_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) enum idxd_complete_type comp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* cdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int idxd_cdev_register(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void idxd_cdev_remove(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int idxd_cdev_get_major(struct idxd_device *idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int idxd_wq_add_cdev(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void idxd_wq_del_cdev(struct idxd_wq *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #endif