^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /* Copyright (c) 2008-2019, IBM Corporation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _SIW_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _SIW_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <rdma/restrack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/crc32c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <rdma/siw-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "iwarp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define SIW_VENDORT_PART_ID 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define SIW_MAX_QP (1024 * 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SIW_MAX_QP_WR (1024 * 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SIW_MAX_ORD_QP 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define SIW_MAX_IRD_QP 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SIW_MAX_CQ (1024 * 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SIW_MAX_CQE (SIW_MAX_QP_WR * 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SIW_MAX_MR (SIW_MAX_QP * 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SIW_MAX_PD SIW_MAX_QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define SIW_MAX_MW 0 /* to be set if MW's are supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define SIW_MAX_SRQ SIW_MAX_QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SIW_MAX_CONTEXT SIW_MAX_PD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Min number of bytes for using zero copy transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SENDPAGE_THRESH PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Maximum number of frames which can be send in one SQ processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SQ_USER_MAXBURST 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Maximum number of consecutive IRQ elements which get served
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * if SQ has pending work. Prevents starving local SQ processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * by serving peer Read Requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct siw_dev_cap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int max_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int max_qp_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int max_ord; /* max. outbound read queue depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int max_ird; /* max. inbound read queue depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int max_sge_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int max_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int max_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int max_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int max_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int max_mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int max_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int max_srq_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int max_srq_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct siw_pd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct ib_pd base_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct siw_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct ib_device base_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct siw_dev_cap attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 vendor_part_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* physical port state (only one port per device) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum ib_port_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct xarray qp_xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct xarray mem_xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct list_head cep_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct list_head qp_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* active objects statistics to enforce limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) atomic_t num_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) atomic_t num_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) atomic_t num_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) atomic_t num_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) atomic_t num_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) atomic_t num_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct work_struct netdev_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct siw_ucontext {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct ib_ucontext base_ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct siw_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * The RDMA core does not define LOCAL_READ access, which is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * enabled implictely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define IWARP_ACCESS_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) IB_ACCESS_REMOTE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * siw presentation of user memory registered as source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * or target of RDMA operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct siw_page_chunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct page **plist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct siw_umem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct siw_page_chunk *page_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) bool writable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u64 fp_addr; /* First page base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct mm_struct *owning_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct siw_pble {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dma_addr_t addr; /* Address of assigned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int size; /* Size of this entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long pbl_off; /* Total offset from start of PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct siw_pbl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned int num_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int max_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct siw_pble pbe[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Generic memory representation for registered siw memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Memory lookup always via higher 24 bit of STag (STag index).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct siw_mem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct siw_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u64 va; /* VA of memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u64 len; /* length of the memory buffer in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 stag; /* iWarp memory access steering tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u8 stag_valid; /* VALID or INVALID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u8 is_pbl; /* PBL or user space mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u8 is_mw; /* Memory Region or Memory Window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) enum ib_access_flags perms; /* local/remote READ & WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct siw_umem *umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct siw_pbl *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void *mem_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct ib_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct siw_mr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct ib_mr base_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct siw_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Error codes for local or remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * access to registered memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enum siw_access_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) E_ACCESS_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) E_STAG_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) E_BASE_BOUNDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) E_ACCESS_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) E_PD_MISMATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) enum siw_wr_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) SIW_WR_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) SIW_WR_QUEUED, /* processing has not started yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) SIW_WR_INPROGRESS /* initiated processing of the WR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* The WQE currently being processed (RX or TX) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct siw_wqe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Copy of applications SQE or RQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct siw_sqe sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct siw_rqe rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) enum siw_wr_state wr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) enum siw_wc_status wc_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 bytes; /* total bytes to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 processed; /* bytes processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct siw_cq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct ib_cq base_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct siw_cq_ctrl *notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct siw_cqe *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 cq_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 cq_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u32 num_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u32 id; /* For debugging only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) enum siw_qp_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) SIW_QP_STATE_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) SIW_QP_STATE_RTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) SIW_QP_STATE_RTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) SIW_QP_STATE_CLOSING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) SIW_QP_STATE_TERMINATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) SIW_QP_STATE_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) SIW_QP_STATE_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) enum siw_qp_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) SIW_RDMA_BIND_ENABLED = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) SIW_RDMA_WRITE_ENABLED = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) SIW_RDMA_READ_ENABLED = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) SIW_SIGNAL_ALL_WR = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) SIW_MPA_CRC = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) SIW_QP_IN_DESTROY = (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) enum siw_qp_attr_mask {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) SIW_QP_ATTR_STATE = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) SIW_QP_ATTR_ACCESS_FLAGS = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) SIW_QP_ATTR_LLP_HANDLE = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) SIW_QP_ATTR_ORD = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) SIW_QP_ATTR_IRD = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) SIW_QP_ATTR_SQ_SIZE = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) SIW_QP_ATTR_RQ_SIZE = (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) SIW_QP_ATTR_MPA = (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct siw_srq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct ib_srq base_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u32 max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u32 limit; /* low watermark for async event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct siw_rqe *recvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u32 rq_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 rq_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u32 num_rqe; /* max # of wqe's allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) bool armed:1; /* inform user if limit hit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bool is_kernel_res:1; /* true if kernel client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct siw_qp_attrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) enum siw_qp_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u32 sq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 rq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u32 orq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u32 irq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u32 sq_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u32 rq_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) enum siw_qp_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct socket *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) enum siw_tx_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) SIW_SEND_HDR, /* start or continue sending HDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) SIW_SEND_DATA, /* start or continue sending DDP payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) SIW_SEND_TRAILER, /* start or continue sending TRAILER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) enum siw_rx_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) SIW_GET_HDR, /* await new hdr or within hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) SIW_GET_DATA_START, /* start of inbound DDP payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) SIW_GET_TRAILER/* await new trailer or within trailer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct siw_rx_stream {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int skb_new; /* pending unread bytes in skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int skb_offset; /* offset in skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int skb_copied; /* processed bytes in skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) union iwarp_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct mpa_trailer trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) enum siw_rx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * For each FPDU, main RX loop runs through 3 stages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Receiving protocol headers, placing DDP payload and receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * trailer information (CRC + possibly padding).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Next two variables keep state on receive status of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * current FPDU part (hdr, data, trailer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int fpdu_part_rcvd; /* bytes in pkt part copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int fpdu_part_rem; /* bytes in pkt part not seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Next expected DDP MSN for each QN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * expected steering tag +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * expected DDP tagget offset (all HBO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 ddp_stag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u64 ddp_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32 inval_stag; /* Stag to be invalidated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct shash_desc *mpa_crc_hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u8 rx_suspend : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u8 pad : 2; /* # of pad bytes expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u8 rdmap_op : 4; /* opcode of current frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct siw_rx_fpdu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Local destination memory of inbound RDMA operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Valid, according to wqe->wr_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct siw_wqe wqe_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned int pbl_idx; /* Index into current PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned int sge_idx; /* current sge in rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned int sge_off; /* already rcvd in curr. sge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) char first_ddp_seg; /* this is the first DDP seg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) char more_ddp_segs; /* more DDP segs expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u8 prev_rdmap_op : 4; /* opcode of prev frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * Shorthands for short packets w/o payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * to be transmitted more efficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct siw_send_pkt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct iwarp_send send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) __be32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct siw_write_pkt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct iwarp_rdma_write write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __be32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct siw_rreq_pkt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct iwarp_rdma_rreq rreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __be32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct siw_rresp_pkt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct iwarp_rdma_rresp rresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __be32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct siw_iwarp_tx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) union iwarp_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Generic part of FPDU header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct iwarp_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct iwarp_ctrl_untagged c_untagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct iwarp_ctrl_tagged c_tagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* FPDU headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct iwarp_rdma_write rwrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct iwarp_rdma_rreq rreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct iwarp_rdma_rresp rresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct iwarp_terminate terminate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct iwarp_send send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct iwarp_send_inv send_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* complete short FPDUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct siw_send_pkt send_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct siw_write_pkt write_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct siw_rreq_pkt rreq_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct siw_rresp_pkt rresp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct mpa_trailer trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* DDP MSN for untagged messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) enum siw_tx_ctx state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u16 ctrl_len; /* ddp+rdmap hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u16 ctrl_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int bytes_unsent; /* ddp payload bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct shash_desc *mpa_crc_hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u8 do_crc : 1; /* do crc for segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) u8 use_sendpage : 1; /* send w/o copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u8 tx_suspend : 1; /* stop sending DDP segs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) u8 pad : 2; /* # pad in current fpdu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) u8 orq_fence : 1; /* ORQ full or Send fenced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u8 in_syscall : 1; /* TX out of user context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u8 zcopy_tx : 1; /* Use TCP_SENDPAGE if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u8 gso_seg_limit; /* Maximum segments for GSO, 0 = unbound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u16 fpdu_len; /* len of FPDU to tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) unsigned int tcp_seglen; /* remaining tcp seg space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct siw_wqe wqe_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int pbl_idx; /* Index into current PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int sge_idx; /* current sge in tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u32 sge_off; /* already sent in curr. sge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct siw_qp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct ib_qp base_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct siw_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct list_head devq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int tx_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct siw_qp_attrs attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct siw_cep *cep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct rw_semaphore state_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct ib_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct siw_cq *scq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct siw_cq *rcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct siw_srq *srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct siw_iwarp_tx tx_ctx; /* Transmit context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spinlock_t sq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct siw_sqe *sendq; /* send queue element array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) uint32_t sq_get; /* consumer index into sq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) uint32_t sq_put; /* kernel prod. index into sq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct llist_node tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct siw_sqe *orq; /* outbound read queue element array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) spinlock_t orq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) uint32_t orq_get; /* consumer index into orq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) uint32_t orq_put; /* shared producer index for ORQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct siw_rx_stream rx_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct siw_rx_fpdu *rx_fpdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct siw_rx_fpdu rx_tagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct siw_rx_fpdu rx_untagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) spinlock_t rq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct siw_rqe *recvq; /* recv queue element array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) uint32_t rq_get; /* consumer index into rq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) uint32_t rq_put; /* kernel prod. index into rq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct siw_sqe *irq; /* inbound read queue element array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) uint32_t irq_get; /* consumer index into irq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) uint32_t irq_put; /* producer index into irq array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int irq_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct { /* information to be carried in TERMINATE pkt, if valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u8 valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u8 in_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u8 layer : 4, etype : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u8 ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } term_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* helper macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #define rx_wqe(rctx) (&(rctx)->wqe_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #define rx_mem(rctx) ((rctx)->wqe_active.mem[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) #define tx_type(wqe) ((wqe)->sqe.opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #define rx_type(wqe) ((wqe)->rqe.opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #define tx_flags(wqe) ((wqe)->sqe.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct iwarp_msg_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct iwarp_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int (*rx_data)(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct siw_user_mmap_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct rdma_user_mmap_entry rdma_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) void *address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Global siw parameters. Currently set in siw_main.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) extern const bool zcopy_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) extern const bool try_gso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) extern const bool loopback_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) extern const bool mpa_crc_required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) extern const bool mpa_crc_strict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) extern const bool siw_tcp_nagle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) extern u_char mpa_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) extern const bool peer_to_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) extern struct task_struct *siw_tx_thread[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) extern struct crypto_shash *siw_crypto_shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* QP general functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) enum siw_qp_attr_mask mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) void siw_qp_llp_close(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) void siw_qp_cm_drop(struct siw_qp *qp, int schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) void siw_send_terminate(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void siw_qp_get_ref(struct ib_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) void siw_qp_put_ref(struct ib_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void siw_free_qp(struct kref *ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u8 etype, u8 ecode, int in_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) enum ddp_ecode siw_tagged_error(enum siw_access_state state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) enum rdmap_ecode siw_rdmap_error(enum siw_access_state state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) enum siw_wc_status status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u32 inval_stag, enum siw_wc_status status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) void siw_qp_llp_data_ready(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) void siw_qp_llp_write_space(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* QP TX path functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int siw_run_sq(void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int siw_qp_sq_process(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int siw_sq_start(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int siw_activate_tx(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void siw_stop_tx_thread(int nr_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int siw_get_tx_cpu(struct siw_device *sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) void siw_put_tx_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* QP RX path functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int siw_proc_send(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int siw_proc_rreq(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int siw_proc_rresp(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int siw_proc_write(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int siw_proc_terminate(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unsigned int off, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline void set_rx_fpdu_context(struct siw_qp *qp, u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (opcode == RDMAP_RDMA_WRITE || opcode == RDMAP_RDMA_READ_RESP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) qp->rx_fpdu = &qp->rx_tagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) qp->rx_fpdu = &qp->rx_untagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) qp->rx_stream.rdmap_op = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return container_of(base_ctx, struct siw_ucontext, base_ucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return container_of(base_qp, struct siw_qp, base_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return container_of(base_cq, struct siw_cq, base_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static inline struct siw_srq *to_siw_srq(struct ib_srq *base_srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return container_of(base_srq, struct siw_srq, base_srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static inline struct siw_device *to_siw_dev(struct ib_device *base_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return container_of(base_dev, struct siw_device, base_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return container_of(base_mr, struct siw_mr, base_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static inline struct siw_user_mmap_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct siw_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) qp = xa_load(&sdev->qp_xa, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (likely(qp && kref_get_unless_zero(&qp->ref))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static inline u32 qp_id(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return qp->base_qp.qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline void siw_qp_get(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kref_get(&qp->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static inline void siw_qp_put(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kref_put(&qp->ref, siw_free_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static inline int siw_sq_empty(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return READ_ONCE(sqe->flags) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static inline struct siw_sqe *sq_get_next(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return &qp->orq[qp->orq_get % qp->attrs.orq_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (READ_ONCE(orq_e->flags) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return orq_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static inline int siw_orq_empty(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return qp->orq[qp->orq_get % qp->attrs.orq_size].flags == 0 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct siw_sqe *irq_e = &qp->irq[qp->irq_put % qp->attrs.irq_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (READ_ONCE(irq_e->flags) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) qp->irq_put++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return irq_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return (__force __wsum)crc32c((__force __u32)sum, buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) (__force __u32)csum2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) const struct skb_checksum_ops siw_cs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) .update = siw_csum_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .combine = siw_csum_combine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) __wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) &siw_cs_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) *(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #define siw_dbg(ibdev, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #define siw_dbg_qp(qp, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) #define siw_dbg_cq(cq, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #define siw_dbg_pd(pd, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) #define siw_dbg_mem(mem, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ibdev_dbg(&mem->sdev->base_dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #define siw_dbg_cep(cep, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) cep, __func__, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void siw_cq_flush(struct siw_cq *cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) void siw_sq_flush(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) void siw_rq_flush(struct siw_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #endif