^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2019 HiSilicon Limited. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef HISI_ACC_QM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define HISI_ACC_QM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define QM_QNUM_V1 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define QM_QNUM_V2 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define QM_MAX_VFS_NUM_V2 63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* qm user domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define QM_ARUSER_M_CFG_1 0x100088
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define AXUSER_SNOOP_ENABLE BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define AXUSER_CMD_TYPE GENMASK(14, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define AXUSER_CMD_SMMU_NORMAL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define AXUSER_NS BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define AXUSER_NO BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define AXUSER_FP BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define AXUSER_SSV BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) FIELD_PREP(AXUSER_CMD_TYPE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) AXUSER_CMD_SMMU_NORMAL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) AXUSER_NS | AXUSER_NO | AXUSER_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define QM_ARUSER_M_CFG_ENABLE 0x100090
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define ARUSER_M_CFG_ENABLE 0xfffffffe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define QM_AWUSER_M_CFG_1 0x100098
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define QM_AWUSER_M_CFG_ENABLE 0x1000a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define AWUSER_M_CFG_ENABLE 0xfffffffe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define QM_WUSER_M_CFG_ENABLE 0x1000a8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define WUSER_M_CFG_ENABLE 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* qm cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define QM_CACHE_CTL 0x100050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SQC_CACHE_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CQC_CACHE_ENABLE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SQC_CACHE_WB_ENABLE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SQC_CACHE_WB_THRD GENMASK(10, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CQC_CACHE_WB_ENABLE BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CQC_CACHE_WB_THRD GENMASK(17, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define QM_AXI_M_CFG 0x1000ac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define AXI_M_CFG 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define QM_AXI_M_CFG_ENABLE 0x1000b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define AXI_M_CFG_ENABLE 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define QM_PEH_AXUSER_CFG 0x1000cc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PEH_AXUSER_CFG 0x401001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PEH_AXUSER_CFG_ENABLE 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define QM_DFX_MB_CNT_VF 0x104010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define QM_DFX_DB_CNT_VF 0x104020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define QM_DFX_SQE_CNT_VF_SQN 0x104030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define QM_DFX_CQE_CNT_VF_CQN 0x104040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define QM_DFX_QN_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define CURRENT_FUN_MASK GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define CURRENT_Q_MASK GENMASK(31, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define QM_AXI_RRESP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define QM_AXI_BRESP BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define QM_ECC_MBIT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define QM_ECC_1BIT BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define QM_ACC_GET_TASK_TIMEOUT BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define QM_ACC_DO_TASK_TIMEOUT BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define QM_SQ_CQ_VF_INVALID BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define QM_CQ_VF_INVALID BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define QM_SQ_VF_INVALID BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define QM_DB_TIMEOUT BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define QM_OF_FIFO_OF BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define QM_DB_RANDOM_INVALID BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define QM_BASE_CE QM_ECC_1BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define QM_Q_DEPTH 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define QM_MIN_QNUM 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define HISI_ACC_SGL_SGE_NR_MAX 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* page number for queue file region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define QM_DOORBELL_PAGE_NR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) enum qm_stop_reason {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) QM_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) QM_SOFT_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) QM_FLR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) enum qm_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) QM_INIT = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) QM_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) QM_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) QM_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) enum qp_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) QP_INIT = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) QP_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) QP_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) QP_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) enum qm_hw_ver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) QM_HW_UNKNOWN = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) QM_HW_V1 = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) QM_HW_V2 = 0x21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) QM_HW_V3 = 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) enum qm_fun_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) QM_HW_PF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) QM_HW_VF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) enum qm_debug_file {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) CURRENT_Q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) CLEAR_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) DEBUG_FILE_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct qm_dfx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) atomic64_t err_irq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) atomic64_t aeq_irq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) atomic64_t abnormal_irq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) atomic64_t create_qp_err_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) atomic64_t mb_err_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct debugfs_file {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum qm_debug_file index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct qm_debug *debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct qm_debug {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 curr_qm_qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 sqe_mask_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 sqe_mask_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct qm_dfx dfx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct dentry *debug_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct dentry *qm_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct debugfs_file files[DEBUG_FILE_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct qm_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct hisi_qm_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 eq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bool eqc_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 aeq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) bool aeqc_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) atomic_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int stop_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct hisi_qm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct hisi_qm_err_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) char *acpi_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 msi_wr_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u32 ecc_2bits_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u32 ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 nfe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct hisi_qm_err_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 is_qm_ecc_mbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 is_dev_ecc_mbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct hisi_qm_err_ini {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int (*hw_init)(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void (*hw_err_enable)(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void (*hw_err_disable)(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void (*open_axi_master_ooo)(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void (*close_axi_master_ooo)(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct hisi_qm_err_info err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct hisi_qm_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int (*register_to_crypto)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void (*unregister_from_crypto)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct hisi_qm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) enum qm_hw_ver ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) enum qm_fun_type fun_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) const char *dev_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void __iomem *io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u32 sqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 qp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u32 qp_in_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 ctrl_qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u32 vfs_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct hisi_qm_list *qm_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct qm_dma qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct qm_sqc *sqc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct qm_cqc *cqc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct qm_eqe *eqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct qm_aeqe *aeqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dma_addr_t sqc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dma_addr_t cqc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dma_addr_t eqe_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dma_addr_t aeqe_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct hisi_qm_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) const struct hisi_qm_err_ini *err_ini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct hisi_qm_err_status err_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long reset_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct rw_semaphore qps_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct idr qp_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct hisi_qp *qp_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct mutex mailbox_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const struct hisi_qm_hw_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct qm_debug debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u32 error_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct work_struct rst_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const char *algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) bool use_sva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) bool is_frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) resource_size_t phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) resource_size_t phys_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct uacce_device *uacce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct hisi_qp_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) atomic_t used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u16 sq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u16 cq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) bool cqc_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) atomic_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct hisi_qp_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct hisi_qp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u8 alg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u8 req_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct qm_dma qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) void *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct qm_cqe *cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dma_addr_t sqe_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dma_addr_t cqe_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct hisi_qp_status qp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct hisi_qp_ops *hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) void *qp_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void (*req_cb)(struct hisi_qp *qp, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void (*event_cb)(struct hisi_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct hisi_qm *qm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bool is_resetting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u16 pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct uacce_queue *uacce_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline int q_num_set(const char *val, const struct kernel_param *kp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned int device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u32 n, q_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pr_info("No device found currently, suppose queue number is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) q_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (pdev->revision == QM_HW_V1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) q_num = QM_QNUM_V1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) q_num = QM_QNUM_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret = kstrtou32(val, 10, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (ret || n < QM_MIN_QNUM || n > q_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return param_set_int(val, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = kstrtou32(val, 10, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (n > QM_MAX_VFS_NUM_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return param_set_int(val, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) INIT_LIST_HEAD(&qm_list->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mutex_init(&qm_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int hisi_qm_init(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void hisi_qm_uninit(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int hisi_qm_start(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int hisi_qm_stop_qp(struct hisi_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void hisi_qm_release_qp(struct hisi_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int hisi_qp_send(struct hisi_qp *qp, const void *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int hisi_qm_debug_init(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void hisi_qm_dev_err_init(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) pci_channel_state_t state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void hisi_qm_reset_prepare(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void hisi_qm_reset_done(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct hisi_acc_sgl_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u32 index, dma_addr_t *hw_sgl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct hisi_acc_hw_sgl *hw_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u32 count, u32 sge_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void hisi_acc_free_sgl_pool(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct hisi_acc_sgl_pool *pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) u8 alg_type, int node, struct hisi_qp **qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) void hisi_qm_dev_shutdown(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #endif