Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/mlx5/qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/mlx5/driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include "wr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) static const u32 mlx5_ib_opcode[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 	[IB_WR_SEND]				= MLX5_OPCODE_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 	[IB_WR_LSO]				= MLX5_OPCODE_LSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 	[IB_WR_SEND_WITH_IMM]			= MLX5_OPCODE_SEND_IMM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 	[IB_WR_RDMA_WRITE]			= MLX5_OPCODE_RDMA_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 	[IB_WR_RDMA_WRITE_WITH_IMM]		= MLX5_OPCODE_RDMA_WRITE_IMM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 	[IB_WR_RDMA_READ]			= MLX5_OPCODE_RDMA_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 	[IB_WR_ATOMIC_CMP_AND_SWP]		= MLX5_OPCODE_ATOMIC_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= MLX5_OPCODE_ATOMIC_FA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 	[IB_WR_SEND_WITH_INV]			= MLX5_OPCODE_SEND_INVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	[IB_WR_LOCAL_INV]			= MLX5_OPCODE_UMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	[IB_WR_REG_MR]				= MLX5_OPCODE_UMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= MLX5_OPCODE_ATOMIC_MASKED_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= MLX5_OPCODE_ATOMIC_MASKED_FA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	[MLX5_IB_WR_UMR]			= MLX5_OPCODE_UMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * next nearby edge and get new address translation for current WQE position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * @sq - SQ buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * @seg: Current WQE position (16B aligned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * @wqe_sz: Total current WQE size [16B].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * @cur_edge: Updated current edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 					 u32 wqe_sz, void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	if (likely(*seg != *cur_edge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	*cur_edge = get_sq_edge(sq, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * pointers. At the end @seg is aligned to 16B regardless the copied size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * @sq - SQ buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * @cur_edge: Updated current edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * @seg: Current WQE position (16B aligned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * @wqe_sz: Total current WQE size [16B].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * @src: Pointer to copy from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * @n: Number of bytes to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 				   void **seg, u32 *wqe_sz, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 				   size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	while (likely(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		size_t leftlen = *cur_edge - *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		size_t copysz = min_t(size_t, leftlen, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		size_t stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		memcpy(*seg, src, copysz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		n -= copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		src += copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		stride = !n ? ALIGN(copysz, 16) : copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		*seg += stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		*wqe_sz += stride >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 			    struct ib_cq *ib_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct mlx5_ib_cq *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	unsigned int cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	cur = wq->head - wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (likely(cur + nreq < wq->max_post))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	cq = to_mcq(ib_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	spin_lock(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	cur = wq->head - wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	spin_unlock(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	return cur + nreq >= wq->max_post;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 					  u64 remote_addr, u32 rkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	rseg->raddr    = cpu_to_be64(remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	rseg->rkey     = cpu_to_be32(rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	rseg->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			void **seg, int *size, void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct mlx5_wqe_eth_seg *eseg = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (wr->send_flags & IB_SEND_IP_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 				 MLX5_ETH_WQE_L4_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (wr->opcode == IB_WR_LSO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		size_t left, copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		void *pdata = ud_wr->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		size_t stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		left = ud_wr->hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		eseg->mss = cpu_to_be16(ud_wr->mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		eseg->inline_hdr.sz = cpu_to_be16(left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		/* memcpy_send_wqe should get a 16B align address. Hence, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		 * first copy up to the current edge and then, if needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		 * continue to memcpy_send_wqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			       left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		memcpy(eseg->inline_hdr.start, pdata, copysz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			       sizeof(eseg->inline_hdr.start) + copysz, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		*size += stride / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		*seg += stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		if (copysz < left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			left -= copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			pdata += copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 					left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	*seg += sizeof(struct mlx5_wqe_eth_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	*size += sizeof(struct mlx5_wqe_eth_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			     const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	dseg->av.dqp_dct =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	dseg->byte_count = cpu_to_be32(sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	dseg->lkey       = cpu_to_be32(sg->lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	dseg->addr       = cpu_to_be64(sg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static u64 get_xlt_octo(u64 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	       MLX5_IB_UMR_OCTOWORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static __be64 frwr_mkey_mask(bool atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	result = MLX5_MKEY_MASK_LEN		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		MLX5_MKEY_MASK_PAGE_SIZE	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		MLX5_MKEY_MASK_START_ADDR	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		MLX5_MKEY_MASK_EN_RINVAL	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		MLX5_MKEY_MASK_KEY		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		MLX5_MKEY_MASK_LR		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		MLX5_MKEY_MASK_LW		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		MLX5_MKEY_MASK_RR		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		MLX5_MKEY_MASK_RW		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		MLX5_MKEY_MASK_SMALL_FENCE	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		MLX5_MKEY_MASK_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		result |= MLX5_MKEY_MASK_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static __be64 sig_mkey_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	result = MLX5_MKEY_MASK_LEN		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		MLX5_MKEY_MASK_PAGE_SIZE	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		MLX5_MKEY_MASK_START_ADDR	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		MLX5_MKEY_MASK_EN_SIGERR	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		MLX5_MKEY_MASK_EN_RINVAL	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		MLX5_MKEY_MASK_KEY		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		MLX5_MKEY_MASK_LR		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		MLX5_MKEY_MASK_LW		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		MLX5_MKEY_MASK_RR		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		MLX5_MKEY_MASK_RW		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		MLX5_MKEY_MASK_SMALL_FENCE	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		MLX5_MKEY_MASK_FREE		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		MLX5_MKEY_MASK_BSF_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			    struct mlx5_ib_mr *mr, u8 flags, bool atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	memset(umr, 0, sizeof(*umr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	umr->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	umr->mkey_mask = frwr_mkey_mask(atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	memset(umr, 0, sizeof(*umr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	umr->flags = MLX5_UMR_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static __be64 get_umr_enable_mr_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	result = MLX5_MKEY_MASK_KEY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		 MLX5_MKEY_MASK_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static __be64 get_umr_disable_mr_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	result = MLX5_MKEY_MASK_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static __be64 get_umr_update_translation_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	result = MLX5_MKEY_MASK_LEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		 MLX5_MKEY_MASK_PAGE_SIZE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		 MLX5_MKEY_MASK_START_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) static __be64 get_umr_update_access_mask(int atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 					 int relaxed_ordering_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 					 int relaxed_ordering_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	result = MLX5_MKEY_MASK_LR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		 MLX5_MKEY_MASK_LW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		 MLX5_MKEY_MASK_RR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		 MLX5_MKEY_MASK_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		result |= MLX5_MKEY_MASK_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (relaxed_ordering_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	if (relaxed_ordering_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static __be64 get_umr_update_pd_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	result = MLX5_MKEY_MASK_PD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	return cpu_to_be64(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	    MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if (mask & MLX5_MKEY_MASK_A &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			       struct mlx5_wqe_umr_ctrl_seg *umr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			       const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	memset(umr, 0, sizeof(*umr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (!umrwr->ignore_free_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			 /* fail if free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			umr->flags = MLX5_UMR_CHECK_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			/* fail if not free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			umr->flags = MLX5_UMR_CHECK_NOT_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		u64 offset = get_xlt_octo(umrwr->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		umr->xlt_offset = cpu_to_be16(offset & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		umr->mkey_mask |= get_umr_update_translation_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		umr->mkey_mask |= get_umr_update_access_mask(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 			!!(MLX5_CAP_GEN(dev->mdev, atomic)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			!!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			!!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		umr->mkey_mask |= get_umr_update_pd_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		umr->mkey_mask |= get_umr_enable_mr_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		umr->mkey_mask |= get_umr_disable_mr_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	if (!wr->num_sge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		umr->flags |= MLX5_UMR_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static u8 get_umr_flags(int acc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			     struct mlx5_ib_mr *mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			     u32 key, int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	memset(seg, 0, sizeof(*seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		seg->log2_page_size = ilog2(mr->ibmr.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		/* KLMs take twice the size of MTTs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		ndescs *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	seg->flags = get_umr_flags(access) | mr->access_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	seg->start_addr = cpu_to_be64(mr->ibmr.iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	seg->len = cpu_to_be64(mr->ibmr.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	seg->xlt_oct_size = cpu_to_be32(ndescs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	memset(seg, 0, sizeof(*seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	seg->status = MLX5_MKEY_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static void set_reg_mkey_segment(struct mlx5_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 				 struct mlx5_mkey_seg *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 				 const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	memset(seg, 0, sizeof(*seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		MLX5_SET(mkc, seg, free, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	MLX5_SET(mkc, seg, a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		 !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	MLX5_SET(mkc, seg, rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		 !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	MLX5_SET(mkc, seg, lr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		MLX5_SET(mkc, seg, relaxed_ordering_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		MLX5_SET(mkc, seg, relaxed_ordering_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (umrwr->pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	    !umrwr->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		MLX5_SET(mkc, seg, length64, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	MLX5_SET64(mkc, seg, len, umrwr->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	MLX5_SET(mkc, seg, qpn, 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			     struct mlx5_ib_mr *mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			     struct mlx5_ib_pd *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	dseg->addr = cpu_to_be64(mr->desc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static __be32 send_ieth(const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		return wr->ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return cpu_to_be32(wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static u8 calc_sig(void *wqe, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	u8 *p = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	u8 res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		res ^= p[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	return ~res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static u8 wq_sig(void *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			    void **wqe, int *wqe_sz, void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct mlx5_wqe_inline_seg *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	int inl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	seg = *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	*wqe += sizeof(*seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	offset = sizeof(*seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		size_t len  = wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		inl += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (unlikely(inl > qp->max_inline_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		while (likely(len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			size_t leftlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			size_t copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			handle_post_send_edge(&qp->sq, wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 					      *wqe_sz + (offset >> 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 					      cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			leftlen = *cur_edge - *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			copysz = min_t(size_t, leftlen, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			memcpy(*wqe, addr, copysz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			len -= copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			addr += copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			*wqe += copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			offset += copysz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	*wqe_sz +=  ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) static u16 prot_field_size(enum ib_signature_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	case IB_SIG_TYPE_T10_DIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return MLX5_DIF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static u8 bs_selector(int block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	switch (block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	case 512:	    return 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	case 520:	    return 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	case 4096:	    return 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	case 4160:	    return 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	case 1073741824:    return 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	default:	    return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			      struct mlx5_bsf_inl *inl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	/* Valid inline section and allow BSF refresh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				       MLX5_BSF_REFRESH_DIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	/* repeating block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			MLX5_DIF_CRC : MLX5_DIF_IPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (domain->sig.dif.ref_remap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	if (domain->sig.dif.app_escape) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if (domain->sig.dif.ref_escape)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	inl->dif_app_bitmask_check =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		cpu_to_be16(domain->sig.dif.apptag_check_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) static int mlx5_set_bsf(struct ib_mr *sig_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			struct ib_sig_attrs *sig_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			struct mlx5_bsf *bsf, u32 data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct mlx5_bsf_basic *basic = &bsf->basic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	struct ib_sig_domain *mem = &sig_attrs->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct ib_sig_domain *wire = &sig_attrs->wire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	memset(bsf, 0, sizeof(*bsf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	/* Basic + Extended + Inline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	basic->bsf_size_sbs = 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	/* Input domain check byte mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	basic->check_byte_mask = sig_attrs->check_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	basic->raw_data_size = cpu_to_be32(data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/* Memory domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	switch (sig_attrs->mem.sig_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	case IB_SIG_TYPE_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	case IB_SIG_TYPE_T10_DIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		mlx5_fill_inl_bsf(mem, &bsf->m_inl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	/* Wire domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	switch (sig_attrs->wire.sig_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	case IB_SIG_TYPE_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	case IB_SIG_TYPE_T10_DIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		    mem->sig_type == wire->sig_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			/* Same block structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			basic->bsf_size_sbs |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 				basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 				basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 				basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			basic->wire.bs_selector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				bs_selector(wire->sig.dif.pi_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		mlx5_fill_inl_bsf(wire, &bsf->w_inl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static int set_sig_data_segment(const struct ib_send_wr *send_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				struct ib_mr *sig_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 				struct ib_sig_attrs *sig_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 				struct mlx5_ib_qp *qp, void **seg, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 				void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct mlx5_bsf *bsf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	u32 data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	u32 data_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	u64 data_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	u32 prot_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	u32 prot_key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	u64 prot_va = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	bool prot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	int wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct mlx5_ib_mr *mr = to_mmr(sig_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct mlx5_ib_mr *pi_mr = mr->pi_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	data_len = pi_mr->data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	data_key = pi_mr->ibmr.lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	data_va = pi_mr->data_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (pi_mr->meta_ndescs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		prot_len = pi_mr->meta_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		prot_key = pi_mr->ibmr.lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		prot_va = pi_mr->pi_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		prot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (!prot || (data_key == prot_key && data_va == prot_va &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		      data_len == prot_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * Source domain doesn't contain signature information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * or data and protection are interleaved in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * So need construct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 *                  ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		 *                 |     data_klm     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		 *                  ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 *                 |       BSF        |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 *                  ------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		struct mlx5_klm *data_klm = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		data_klm->bcount = cpu_to_be32(data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		data_klm->key = cpu_to_be32(data_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		data_klm->va = cpu_to_be64(data_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		wqe_size = ALIGN(sizeof(*data_klm), 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 * Source domain contains signature information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * So need construct a strided block format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 *               ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 *              |     stride_block_ctrl     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		 *               ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		 *              |          data_klm         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		 *               ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		 *              |          prot_klm         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		 *               ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		 *              |             BSF           |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		 *               ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		struct mlx5_stride_block_entry *data_sentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		struct mlx5_stride_block_entry *prot_sentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		int prot_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		sblock_ctrl = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		prot_size = prot_field_size(sig_attrs->mem.sig_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		if (!prot_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			pr_err("Bad block size given: %u\n", block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 							    prot_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		sblock_ctrl->num_entries = cpu_to_be16(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		data_sentry->bcount = cpu_to_be16(block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		data_sentry->key = cpu_to_be32(data_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		data_sentry->va = cpu_to_be64(data_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		data_sentry->stride = cpu_to_be16(block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		prot_sentry->bcount = cpu_to_be16(prot_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		prot_sentry->key = cpu_to_be32(prot_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		prot_sentry->va = cpu_to_be64(prot_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		prot_sentry->stride = cpu_to_be16(prot_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				 sizeof(*prot_sentry), 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	*seg += wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	*size += wqe_size / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	bsf = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	*seg += sizeof(*bsf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	*size += sizeof(*bsf) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 				 struct ib_mr *sig_mr, int access_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				 u32 size, u32 length, u32 pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	u32 sig_key = sig_mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	memset(seg, 0, sizeof(*seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				    MLX5_MKEY_BSF_EN | pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	seg->len = cpu_to_be64(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	memset(umr, 0, sizeof(*umr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	umr->mkey_mask = sig_mkey_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			 struct mlx5_ib_qp *qp, void **seg, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			 void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	const struct ib_reg_wr *wr = reg_wr(send_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	u32 xlt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	int region_len, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	if (unlikely(send_wr->num_sge != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	    unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	    unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	    unlikely(!sig_mr->sig->sig_status_checked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/* length of the protected region, data + protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	region_len = pi_mr->ibmr.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	 * KLM octoword size - if protection was provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	 * then we use strided block format (3 octowords),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * else we use single KLM (1 octoword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		xlt_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		xlt_size = sizeof(struct mlx5_klm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	set_sig_umr_segment(*seg, xlt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			     pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	*seg += sizeof(struct mlx5_mkey_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	*size += sizeof(struct mlx5_mkey_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				   cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	sig_mr->sig->sig_status_checked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) static int set_psv_wr(struct ib_sig_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		      u32 psv_idx, void **seg, int *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct mlx5_seg_set_psv *psv_seg = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	memset(psv_seg, 0, sizeof(*psv_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	psv_seg->psv_num = cpu_to_be32(psv_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	switch (domain->sig_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	case IB_SIG_TYPE_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	case IB_SIG_TYPE_T10_DIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 						     domain->sig.dif.app_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		pr_err("Bad signature type (%d) is given.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		       domain->sig_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	*seg += sizeof(*psv_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	*size += sizeof(*psv_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static int set_reg_wr(struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		      const struct ib_reg_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		      void **seg, int *size, void **cur_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		      bool check_not_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	struct mlx5_ib_mr *mr = to_mmr(wr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	/* Matches access in mlx5_set_umr_free_mkey() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		mlx5_ib_warn(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			to_mdev(qp->ibqp.device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			"Fast update for MR access flags is not possible\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		mlx5_ib_warn(to_mdev(qp->ibqp.device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			     "Invalid IB_SEND_INLINE send flag\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (check_not_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		flags |= MLX5_UMR_CHECK_NOT_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	if (umr_inline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		flags |= MLX5_UMR_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	set_reg_umr_seg(*seg, mr, flags, atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	*seg += sizeof(struct mlx5_mkey_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	*size += sizeof(struct mlx5_mkey_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (umr_inline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 				mr_list_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		*size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		set_reg_data_seg(*seg, mr, pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		*seg += sizeof(struct mlx5_wqe_data_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		*size += (sizeof(struct mlx5_wqe_data_seg) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	set_linv_umr_seg(*seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	set_linv_mkey_seg(*seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	*seg += sizeof(struct mlx5_mkey_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	*size += sizeof(struct mlx5_mkey_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	__be32 *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	pr_debug("dump WQE index %u:\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		if ((i & 0xf) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			pr_debug("WQBB at %p:\n", (void *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			 be32_to_cpu(p[j + 3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		       struct mlx5_wqe_ctrl_seg **ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		       const struct ib_send_wr *wr, unsigned int *idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		       int *size, void **cur_edge, int nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		       bool send_signaled, bool solicited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	*seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	*ctrl = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	*(uint32_t *)(*seg + 8) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	(*ctrl)->imm = send_ieth(wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	(*ctrl)->fm_ce_se = qp->sq_signal_bits |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		(send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		(solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	*seg += sizeof(**ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	*size = sizeof(**ctrl) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	*cur_edge = qp->sq.cur_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		     struct mlx5_wqe_ctrl_seg **ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		     const struct ib_send_wr *wr, unsigned int *idx, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		     void **cur_edge, int nreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			   wr->send_flags & IB_SEND_SIGNALED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			   wr->send_flags & IB_SEND_SOLICITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static void finish_wqe(struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		       struct mlx5_wqe_ctrl_seg *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		       void *seg, u8 size, void *cur_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		       unsigned int idx, u64 wr_id, int nreq, u8 fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		       u32 mlx5_opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	u8 opmod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 					     mlx5_opcode | ((u32)opmod << 24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	ctrl->fm_ce_se |= fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		ctrl->signature = wq_sig(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	qp->sq.wrid[idx] = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	qp->sq.w_list[idx].opcode = mlx5_opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	qp->sq.wqe_head[idx] = qp->sq.head + nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	qp->sq.w_list[idx].next = qp->sq.cur_post;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	/* We save the edge which was possibly updated during the WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	 * construction, into SQ's cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			  get_sq_edge(&qp->sq, qp->sq.cur_post &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				      (qp->sq.wqe_cnt - 1)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			  cur_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	*seg += sizeof(struct mlx5_wqe_raddr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	*size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			     struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			     int *size, void **cur_edge, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	(*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	set_linv_wr(qp, seg, size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			 void **cur_edge, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	qp->sq.wr_data[idx] = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	(*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		      const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		      struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		      void **cur_edge, unsigned int *idx, int nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		      struct ib_sig_domain *domain, u32 psv_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		      u8 next_fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 * SET_PSV WQEs are not signaled and solicited on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			  false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	err = set_psv_wr(domain, psv_index, seg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		   next_fence, MLX5_OPCODE_SET_PSV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				   struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				   const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				   struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				   int *size, void **cur_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				   unsigned int *idx, int nreq, u8 fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 				   u8 next_fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct mlx5_ib_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	struct mlx5_ib_mr *pi_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct mlx5_ib_mr pa_pi_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	struct ib_sig_attrs *sig_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct ib_reg_wr reg_pi_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	mr = to_mmr(reg_wr(wr)->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	pi_mr = mr->pi_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (pi_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		memset(&reg_pi_wr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		       sizeof(struct ib_reg_wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		reg_pi_wr.mr = &pi_mr->ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		reg_pi_wr.access = reg_wr(wr)->access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		reg_pi_wr.key = pi_mr->ibmr.rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		(*ctrl)->imm = cpu_to_be32(reg_pi_wr.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		/* UMR for data + prot registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			   nreq, fence, MLX5_OPCODE_UMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		/* No UMR, use local_dma_lkey */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		pa_pi_mr.ndescs = mr->ndescs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		pa_pi_mr.data_length = mr->data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		pa_pi_mr.data_iova = mr->data_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		if (mr->meta_ndescs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			pa_pi_mr.meta_ndescs = mr->meta_ndescs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			pa_pi_mr.meta_length = mr->meta_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			pa_pi_mr.pi_iova = mr->pi_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		pa_pi_mr.ibmr.length = mr->ibmr.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		mr->pi_mr = &pa_pi_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	(*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	/* UMR for sig MR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		   fence, MLX5_OPCODE_UMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	sig_attrs = mr->ibmr.sig_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			 &sig_attrs->mem, mr->sig->psv_memory.psv_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			 next_fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			 &sig_attrs->wire, mr->sig->psv_wire.psv_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			 next_fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			 const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			 void **cur_edge, unsigned int *idx, int nreq, u8 fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			 u8 next_fence, int *num_sge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		handle_rdma_op(wr, seg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		*num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		*num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	case IB_WR_REG_MR_INTEGRITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					      cur_edge, idx, nreq, fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 					      next_fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		*num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		handle_rdma_op(wr, seg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			      const struct ib_send_wr *wr, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			      int *size, void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	set_datagram_seg(*seg, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	*seg += sizeof(struct mlx5_wqe_datagram_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	*size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			  void **seg, int *size, void **cur_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	set_datagram_seg(*seg, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	*seg += sizeof(struct mlx5_wqe_datagram_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	*size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* handle qp that supports ud offload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		struct mlx5_wqe_eth_pad *pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		pad = *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		*seg += sizeof(struct mlx5_wqe_eth_pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		*size += sizeof(struct mlx5_wqe_eth_pad) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		set_eth_seg(wr, qp, seg, size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			      const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			      struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			      int *size, void **cur_edge, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	(*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	err = set_reg_umr_segment(dev, *seg, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	set_reg_mkey_segment(dev, *seg, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	*seg += sizeof(struct mlx5_mkey_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	*size += sizeof(struct mlx5_mkey_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		      const struct ib_send_wr **bad_wr, bool drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	struct mlx5_core_dev *mdev = dev->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	struct mlx5_ib_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	struct mlx5_wqe_xrc_seg *xrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	struct mlx5_bf *bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	void *cur_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	int num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	void *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	int nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	u8 next_fence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	u8 fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		     !drain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	bf = &qp->bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	spin_lock_irqsave(&qp->sq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	for (nreq = 0; wr; nreq++, wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		num_sge = wr->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		if (unlikely(num_sge > qp->sq.max_gs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 				nreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		if (wr->opcode == IB_WR_REG_MR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		    wr->opcode == IB_WR_REG_MR_INTEGRITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			fence = dev->umr_fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		} else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			if (wr->send_flags & IB_SEND_FENCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 				if (qp->next_fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 					fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 					fence = MLX5_FENCE_MODE_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 				fence = qp->next_fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		switch (ibqp->qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		case IB_QPT_XRC_INI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			xrc = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			seg += sizeof(*xrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			size += sizeof(*xrc) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 					    &cur_edge, &idx, nreq, fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 					    next_fence, &num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 				*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			} else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 				goto skip_psv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			handle_qpt_uc(wr, &seg, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		case IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 				err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		case MLX5_IB_QPT_HW_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		case MLX5_IB_QPT_REG_UMR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 						       &size, &cur_edge, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		if (wr->send_flags & IB_SEND_INLINE && num_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 				mlx5_ib_warn(dev, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 				*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			for (i = 0; i < num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 				handle_post_send_edge(&qp->sq, &seg, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 						      &cur_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 				if (unlikely(!wr->sg_list[i].length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				set_data_ptr_seg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 					(struct mlx5_wqe_data_seg *)seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 					wr->sg_list + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 				size += sizeof(struct mlx5_wqe_data_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 				seg += sizeof(struct mlx5_wqe_data_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		qp->next_fence = next_fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			   fence, mlx5_ib_opcode[wr->opcode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) skip_psv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		if (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			dump_wqe(qp, idx, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	if (likely(nreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		qp->sq.head += nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		/* Make sure that descriptors are written before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		 * updating doorbell record and ringing the doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		/* Make sure doorbell record is visible to the HCA before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		 * we hit doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		/* Make sure doorbells don't leak out of SQ spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		 * and reach the HCA out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		bf->offset ^= bf->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	spin_unlock_irqrestore(&qp->sq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void set_sig_seg(struct mlx5_rwqe_sig *sig, int max_gs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	 sig->signature = calc_sig(sig, (max_gs + 1) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		      const struct ib_recv_wr **bad_wr, bool drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	struct mlx5_wqe_data_seg *scat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	struct mlx5_rwqe_sig *sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	struct mlx5_core_dev *mdev = dev->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	int nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	int ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		     !drain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	spin_lock_irqsave(&qp->rq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	for (nreq = 0; wr; nreq++, wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			scat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		for (i = 0; i < wr->num_sge; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			set_data_ptr_seg(scat + i, wr->sg_list + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		if (i < qp->rq.max_gs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			scat[i].byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			scat[i].addr       = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			sig = (struct mlx5_rwqe_sig *)scat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			set_sig_seg(sig, qp->rq.max_gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		qp->rq.wrid[ind] = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (likely(nreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		qp->rq.head += nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		/* Make sure that descriptors are written before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		 * doorbell record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	spin_unlock_irqrestore(&qp->rq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }