Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright(c) 2016 - 2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #ifndef DEF_RDMAVT_INCQP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #define DEF_RDMAVT_INCQP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <rdma/rdma_vt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <rdma/ib_pack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <rdma/rdmavt_cq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <rdma/rvt-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Atomic bit definitions for r_aflags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #define RVT_R_WRID_VALID        0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define RVT_R_REWIND_SGE        1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * Bit definitions for r_flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define RVT_R_REUSE_SGE 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define RVT_R_RDMAR_SEQ 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define RVT_R_RSP_NAK   0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define RVT_R_RSP_SEND  0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define RVT_R_COMM_EST  0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * If a packet's QP[23:16] bits match this value, then it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * a PSM packet and the hardware will expect a KDETH header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * following the BTH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define RVT_KDETH_QP_PREFIX       0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define RVT_KDETH_QP_SUFFIX       0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define RVT_KDETH_QP_PREFIX_MASK  0x00ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define RVT_KDETH_QP_PREFIX_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define RVT_KDETH_QP_BASE         (u32)(RVT_KDETH_QP_PREFIX << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 					RVT_KDETH_QP_PREFIX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define RVT_KDETH_QP_MAX          (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * prefix value, then it is an AIP packet with a DETH containing the entropy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * value in byte 4 following the BTH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define RVT_AIP_QP_PREFIX       0x81
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define RVT_AIP_QP_SUFFIX       0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define RVT_AIP_QP_PREFIX_MASK  0x00ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define RVT_AIP_QP_PREFIX_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define RVT_AIP_QP_BASE         (u32)(RVT_AIP_QP_PREFIX << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 				      RVT_AIP_QP_PREFIX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define RVT_AIP_QPN_MAX         BIT(RVT_AIP_QP_PREFIX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define RVT_AIP_QP_MAX          (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * Bit definitions for s_flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * RVT_S_BUSY - send tasklet is processing the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * RVT_S_TIMER - the RC retry timer is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  *                         before processing the next SWQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  *                         before processing the next SWQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * RVT_S_WAIT_RNR - waiting for RNR timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  *                  next send completion entry not via send DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * RVT_S_ECN - a BECN was queued to the send engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define RVT_S_SIGNAL_REQ_WR	0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define RVT_S_BUSY		0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define RVT_S_TIMER		0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define RVT_S_RESP_PENDING	0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define RVT_S_ACK_PENDING	0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define RVT_S_WAIT_FENCE	0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define RVT_S_WAIT_RDMAR	0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define RVT_S_WAIT_RNR		0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define RVT_S_WAIT_SSN_CREDIT	0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define RVT_S_WAIT_DMA		0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define RVT_S_WAIT_PIO		0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define RVT_S_WAIT_TX		0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define RVT_S_WAIT_DMA_DESC	0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define RVT_S_WAIT_KMEM		0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define RVT_S_WAIT_PSN		0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define RVT_S_WAIT_ACK		0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define RVT_S_SEND_ONE		0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define RVT_S_UNLIMITED_CREDIT	0x20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define RVT_S_ECN		0x40000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define RVT_S_MAX_BIT_MASK	0x800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Drivers should use s_flags starting with bit 31 down to the bit next to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * RVT_S_MAX_BIT_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * Wait flags that would prevent any packet type from being sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define RVT_S_ANY_WAIT_IO \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	(RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * Wait flags that would prevent send work requests from making progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) /* Number of bits to pay attention to in the opcode for checking qp type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define RVT_OPCODE_QP_MASK 0xE0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /* Flags for checking QP state (see ib_rvt_state_ops[]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define RVT_POST_SEND_OK                0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define RVT_POST_RECV_OK                0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define RVT_PROCESS_RECV_OK             0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define RVT_PROCESS_SEND_OK             0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define RVT_PROCESS_NEXT_SEND_OK        0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define RVT_FLUSH_SEND			0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define RVT_FLUSH_RECV			0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define RVT_PROCESS_OR_FLUSH_SEND \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * Internal send flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * rvt_ud_wr - IB UD work plus AH cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * @wr: valid IB work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * @attr: pointer to an allocated AH attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * Special case the UD WR so we can keep track of the AH attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * MUST come after wr.  The ib_ud_wr is sized and copied in rvt_post_one_wr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * The copy assumes that wr is first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) struct rvt_ud_wr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct ib_ud_wr wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	struct rdma_ah_attr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * Send work request queue entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * The size of the sg_list is determined when the QP is created and stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * in qp->s_max_sge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) struct rvt_swqe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		struct ib_send_wr wr;   /* don't use wr.sg_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		struct rvt_ud_wr ud_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		struct ib_reg_wr reg_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		struct ib_rdma_wr rdma_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		struct ib_atomic_wr atomic_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	u32 psn;                /* first packet sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	u32 lpsn;               /* last packet sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	u32 ssn;                /* send sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	u32 length;             /* total length of data in sg_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	void *priv;             /* driver dependent field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct rvt_sge sg_list[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * struct rvt_krwq - kernel struct receive work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * @p_lock: lock to protect producer of the kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * @head: index of next entry to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * @c_lock:lock to protect consumer of the kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * @tail: index of next entry to pull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * @count: count is aproximate of total receive enteries posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * @rvt_rwqe: struct of receive work request queue entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * This structure is used to contain the head pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * tail pointer and receive work queue entries for kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * mode user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) struct rvt_krwq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	spinlock_t p_lock;	/* protect producer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	u32 head;               /* new work requests posted to the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	/* protect consumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	spinlock_t c_lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	u32 tail;               /* receives pull requests from here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	u32 count;		/* approx count of receive entries posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct rvt_rwqe *curr_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct rvt_rwqe wq[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * @swqe: valid Send WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return ibah_to_rvtah(swqe->ud_wr.wr.ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * rvt_get_swqe_ah_attr - Return the cached ah attribute information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * @swqe: valid Send WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	return swqe->ud_wr.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * rvt_get_swqe_remote_qpn - Access the remote QPN value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * @swqe: valid Send WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	return swqe->ud_wr.wr.remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * rvt_get_swqe_remote_qkey - Acces the remote qkey value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * @swqe: valid Send WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return swqe->ud_wr.wr.remote_qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * rvt_get_swqe_pkey_index - Access the pkey index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * @swqe: valid Send WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	return swqe->ud_wr.wr.pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) struct rvt_rq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct rvt_rwq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct rvt_krwq *kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	u32 size;               /* size of RWQE array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	u8 max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	/* protect changes in this struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	spinlock_t lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  * rvt_get_rq_count - count numbers of request work queue entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * in circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * @rq: data structure for request queue entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  * @head: head indices of the circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  * @tail: tail indices of the circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  * Return - total number of entries in the Receive Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	u32 count = head - tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if ((s32)count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		count += rq->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * This structure holds the information that the send tasklet needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * to send a RDMA read response or atomic operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) struct rvt_ack_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	struct rvt_sge rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	u64 atomic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	u32 psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	u32 lpsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	u8 sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) #define	RC_QP_SCALING_INTERVAL	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) #define RVT_OPERATION_PRIV        0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) #define RVT_OPERATION_ATOMIC      0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) #define RVT_OPERATION_ATOMIC_SGE  0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) #define RVT_OPERATION_LOCAL       0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) #define RVT_OPERATION_USE_RESERVE 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * rvt_operation_params - op table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * @length - the length to copy into the swqe entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * @qpt_support - a bit mask indicating QP type support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * @flags - RVT_OPERATION flags (see above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * This supports table driven post send so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * the driver can have differing an potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * different sets of operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) struct rvt_operation_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	u32 qpt_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * Common variables are protected by both r_rq.lock and s_lock in that order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * which only happens in modify_qp() or changing the QP 'state'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) struct rvt_qp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	struct ib_qp ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	void *priv; /* Driver private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* read mostly fields above and below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	struct rdma_ah_attr remote_ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	struct rdma_ah_attr alt_ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct rvt_swqe *s_wq;  /* send work queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct rvt_mmap_info *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	unsigned long timeout_jiffies;  /* computed from timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	pid_t pid;		/* pid for user mode QPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	u32 remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	u32 qkey;               /* QKEY for this QP (for UD or RD) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	u32 s_size;             /* send work queue size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	u16 pmtu;		/* decoded from path_mtu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	u8 log_pmtu;		/* shift for pmtu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	u8 state;               /* QP state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	u8 allowed_ops;		/* high order bits of allowed opcodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	u8 qp_access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	u8 alt_timeout;         /* Alternate path timeout for this QP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	u8 timeout;             /* Timeout for this QP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	u8 s_srate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	u8 s_mig_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	u8 s_pkey_index;        /* PKEY index to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	u8 s_retry_cnt;         /* number of times to retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	u8 s_rnr_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	u8 s_max_sge;           /* size of s_wq->sg_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	u8 s_draining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	/* start of read/write fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	atomic_t refcount ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct rvt_ack_entry *s_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct rvt_sge_state s_rdma_read_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	u32 r_psn;              /* expected rcv packet sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	unsigned long r_aflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	u64 r_wr_id;            /* ID for current receive WQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	u32 r_len;              /* total length of r_sge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	u32 r_rcv_len;          /* receive data len processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	u32 r_msn;              /* message sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	u8 r_state;             /* opcode of last packet received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	u8 r_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	u8 r_adefered;          /* defered ack count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	struct list_head rspwait;       /* link for waiting to respond */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct rvt_sge_state r_sge;     /* current receive data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct rvt_rq r_rq;             /* receive work queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	/* post send line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	spinlock_t s_hlock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	u32 s_head;             /* new entries added here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	u32 s_next_psn;         /* PSN for next request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	u32 s_avail;            /* number of entries avail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	u32 s_ssn;              /* SSN of tail entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	atomic_t s_reserved_used; /* reserved entries in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	spinlock_t s_lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	u32 s_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct rvt_sge_state *s_cur_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct rvt_swqe *s_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct rvt_sge_state s_sge;     /* current send request data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct rvt_mregion *s_rdma_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	u32 s_len;              /* total length of s_sge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	u32 s_last_psn;         /* last response PSN processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	u32 s_sending_psn;      /* lowest PSN that is being sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	u32 s_sending_hpsn;     /* highest PSN that is being sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	u32 s_psn;              /* current packet sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	u32 s_tail;             /* next entry to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	u32 s_cur;              /* current work queue entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	u32 s_acked;            /* last un-ACK'ed entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	u32 s_last;             /* last completed entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	u32 s_lsn;              /* limit sequence number (credit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	u16 s_cur_size;         /* size of send packet in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	u16 s_rdma_ack_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	s8 s_ahgidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	u8 s_state;             /* opcode of last packet sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	u8 s_ack_state;         /* opcode of packet to ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	u8 s_nak_state;         /* non-zero if NAK is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	u8 r_nak_state;         /* non-zero if NAK is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	u8 s_retry;             /* requester retry counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	u8 s_rnr_retry;         /* requester RNR retry counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct rvt_sge_state s_ack_rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct timer_list s_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	struct hrtimer s_rnr_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * This sge list MUST be last. Do not add anything below here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct rvt_sge r_sg_list[] /* verified SGEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) struct rvt_srq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct ib_srq ibsrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct rvt_rq rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct rvt_mmap_info *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* send signal when number of RWQEs < limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	return container_of(ibsrq, struct rvt_srq, ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	return container_of(ibqp, struct rvt_qp, ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) #define RVT_QPN_MAX                 BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #define RVT_QPN_MASK		    IB_QPN_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * QPN-map pages start out as NULL, they get allocated upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * first use and are never deallocated. This way,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * large bitmaps are not allocated unless large numbers of QPs are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) struct rvt_qpn_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	void *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) struct rvt_qpn_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spinlock_t lock; /* protect changes to the qp table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	unsigned flags;         /* flags for QP0/1 allocated for each port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	u32 last;               /* last QP number allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	u32 nmaps;              /* size of the map table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	u16 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	u8  incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	/* bit map of free QP numbers other than 0/1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) struct rvt_qp_ibdev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	u32 qp_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	u32 qp_table_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	struct rvt_qp __rcu **qp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	spinlock_t qpt_lock; /* qptable lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct rvt_qpn_table qpn_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * There is one struct rvt_mcast for each multicast GID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  * All attached QPs are then stored as a list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * struct rvt_mcast_qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) struct rvt_mcast_qp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) struct rvt_mcast_addr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	union ib_gid mgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	u16 lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) struct rvt_mcast {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct rvt_mcast_addr mcast_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct list_head qp_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	atomic_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	int n_attached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * Since struct rvt_swqe is not a fixed size, we can't simply index into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * struct rvt_qp.s_wq.  This function does the array index computation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 						unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	return (struct rvt_swqe *)((char *)qp->s_wq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				     (sizeof(struct rvt_swqe) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 				      qp->s_max_sge *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 				      sizeof(struct rvt_sge)) * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * struct rvt_rwq.wq.  This function does the array index computation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	return (struct rvt_rwqe *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		((char *)rq->kwq->curr_wq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		 (sizeof(struct rvt_rwqe) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		  rq->max_sge * sizeof(struct ib_sge)) * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * rvt_is_user_qp - return if this is user mode QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * @qp - the target QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static inline bool rvt_is_user_qp(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	return !!qp->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * rvt_get_qp - get a QP reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * @qp - the QP to hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) static inline void rvt_get_qp(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	atomic_inc(&qp->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * rvt_put_qp - release a QP reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * @qp - the QP to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static inline void rvt_put_qp(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (qp && atomic_dec_and_test(&qp->refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		wake_up(&qp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * rvt_put_swqe - drop mr refs held by swqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * @wqe - the send wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * This drops any mr references held by the swqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static inline void rvt_put_swqe(struct rvt_swqe *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	for (i = 0; i < wqe->wr.num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		struct rvt_sge *sge = &wqe->sg_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		rvt_put_mr(sge->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * rvt_qp_wqe_reserve - reserve operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * @qp - the rvt qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * @wqe - the send wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * This routine used in post send to record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * a wqe relative reserved operation use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) static inline void rvt_qp_wqe_reserve(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	struct rvt_swqe *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	atomic_inc(&qp->s_reserved_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * rvt_qp_wqe_unreserve - clean reserved operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * @qp - the rvt qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * @flags - send wqe flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * This decrements the reserve use count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * This call MUST precede the change to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  * s_last to insure that post send sees a stable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  * s_avail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * An smp_mp__after_atomic() is used to insure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * the compiler does not juggle the order of the s_last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * ring index and the decrementing of s_reserved_used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		atomic_dec(&qp->s_reserved_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		/* insure no compiler re-order up to s_last change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * Compare the lower 24 bits of the msn values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * Returns an integer <, ==, or > than zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static inline int rvt_cmp_msn(u32 a, u32 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	return (((int)a) - ((int)b)) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) __be32 rvt_compute_aeth(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  * rvt_div_round_up_mtu - round up divide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * @qp - the qp pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * @len - the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * Perform a shift based mtu round up divide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	return (len + qp->pmtu - 1) >> qp->log_pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  * @qp - the qp pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  * @len - the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * Perform a shift based mtu divide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	return len >> qp->log_pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * @timeout - timeout input(0 - 31).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * Return a timeout value in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	if (timeout > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		timeout = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * rvt_lookup_qpn - return the QP with the given QPN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  * @ibp: the ibport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * @qpn: the QP number to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * The caller must hold the rcu_read_lock(), and keep the lock until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * the returned qp is no longer in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 					    struct rvt_ibport *rvp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 					    u32 qpn) __must_hold(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct rvt_qp *qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (unlikely(qpn <= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		qp = rcu_dereference(rvp->qp[qpn]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			qp = rcu_dereference(qp->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			if (qp->ibqp.qp_num == qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	return qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * rvt_mod_retry_timer - mod a retry timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * @qp - the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * @shift - timeout shift to wait for multiple packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * Modify a potentially already running retry timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct ib_qp *ibqp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	qp->s_flags |= RVT_S_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	/* 4.096 usec. * (1 << qp->timeout) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		  (qp->timeout_jiffies << shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	return rvt_mod_retry_timer_ext(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * rvt_put_qp_swqe - drop refs held by swqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * @qp: the send qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * @wqe: the send wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * This drops any references held by the swqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	rvt_put_swqe(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (qp->allowed_ops == IB_OPCODE_UD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		rdma_destroy_ah_attr(wqe->ud_wr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * rvt_qp_sqwe_incr - increment ring index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * @qp: the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * @val: the starting value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * Return: the new value wrapping as appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	if (++val >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  * rvt_recv_cq - add a new entry to completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  *			by receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  * @qp: receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * @wc: work completion entry to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * @solicited: true if @entry is solicited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * This is wrapper function for rvt_enter_cq function call by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * receive queue. If rvt_cq_enter return false, it means cq is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * full and the qp is put into error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			       bool solicited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  * rvt_send_cq - add a new entry to completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  *                        by send queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  * @qp: send queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  * @wc: work completion entry to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  * @solicited: true if @entry is solicited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * This is wrapper function for rvt_enter_cq function call by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * send queue. If rvt_cq_enter return false, it means cq is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  * full and the qp is put into error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			       bool solicited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * rvt_qp_complete_swqe - insert send completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * @qp - the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * @wqe - the send wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  * @opcode - wc operation (driver dependent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * @status - completion status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  * Update the s_last information, and then insert a send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  * completion into the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  * queue if the qp indicates it should be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  * See IBTA 10.7.3.1 for info on completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * Return: new last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) rvt_qp_complete_swqe(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		     struct rvt_swqe *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		     enum ib_wc_opcode opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		     enum ib_wc_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	bool need_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	u64 wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	u32 byte_len, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	int flags = wqe->wr.send_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	rvt_qp_wqe_unreserve(qp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	rvt_put_qp_swqe(qp, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	need_completion =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		!(flags & RVT_SEND_RESERVE_USED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		(!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		(flags & IB_SEND_SIGNALED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		status != IB_WC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (need_completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		wr_id = wqe->wr.wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		byte_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		/* above fields required before writing s_last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	last = rvt_qp_swqe_incr(qp, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	/* see rvt_qp_is_avail() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	smp_store_release(&qp->s_last, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (need_completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		struct ib_wc w = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			.wr_id = wr_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			.status = status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			.opcode = opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			.qp = &qp->ibqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			.byte_len = byte_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	return last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) extern const int  ib_rvt_state_ops[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) struct rvt_dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) void rvt_comm_est(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) unsigned long rvt_rnr_tbl_to_usec(u32 index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) void rvt_del_timers_sync(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) void rvt_stop_rc_timers(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) static inline void rvt_add_retry_timer(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	rvt_add_retry_timer_ext(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		  void *data, u32 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		  bool release, bool copy_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		       enum ib_wc_status status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) void rvt_ruc_loopback(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * struct rvt_qp_iter - the iterator for QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  * @qp - the current QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * This structure defines the current iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * state for sequenced access to all QPs relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * to an rvt_dev_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) struct rvt_qp_iter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/* private: backpointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct rvt_dev_info *rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	/* private: callback routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	void (*cb)(struct rvt_qp *qp, u64 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	/* private: for arg to callback routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	/* private: number of SMI,GSI QPs for device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	int specials;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	/* private: current iterator index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  * ib_cq_tail - Return tail index of cq buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  * @send_cq - The cq for send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  * This is called in qp_iter_print to get tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  * of cq buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static inline u32 ib_cq_tail(struct ib_cq *send_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	return ibcq_to_rvtcq(send_cq)->ip ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	       RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	       ibcq_to_rvtcq(send_cq)->kqueue->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * ib_cq_head - Return head index of cq buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * @send_cq - The cq for send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * This is called in qp_iter_print to get head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * of cq buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) static inline u32 ib_cq_head(struct ib_cq *send_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	return ibcq_to_rvtcq(send_cq)->ip ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	       RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	       ibcq_to_rvtcq(send_cq)->kqueue->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  * rvt_free_rq - free memory allocated for rvt_rq struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  * @rvt_rq: request queue data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * This function should only be called if the rvt_mmap_info()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * has not succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static inline void rvt_free_rq(struct rvt_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	kvfree(rq->kwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	rq->kwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	vfree(rq->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	rq->wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * rvt_to_iport - Get the ibport pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * @qp: the qp pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * This function returns the ibport pointer from the qp pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	return rdi->ports[qp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * rvt_rc_credit_avail - Check if there are enough RC credits for the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * @qp: the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * @wqe: the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  * This function returns false when there are not enough credits for the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * request and true otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	    rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		struct rvt_ibport *rvp = rvt_to_iport(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		rvp->n_rc_crwaits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 				     u64 v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 				     void (*cb)(struct rvt_qp *qp, u64 v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) int rvt_qp_iter_next(struct rvt_qp_iter *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) void rvt_qp_iter(struct rvt_dev_info *rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		 u64 v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		 void (*cb)(struct rvt_qp *qp, u64 v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #endif          /* DEF_RDMAVT_INCQP_H */