Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Work Requests exploiting Infiniband API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Work requests (WR) of type ib_post_send or ib_post_recv respectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * are submitted to either RC SQ or RC RQ respectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * (reliably connected send/receive queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * and become work queue entries (WQEs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * While an SQ WR/WQE is pending, we track it until transmission completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Through a send or receive completion queue (CQ) respectively,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * we get completion queue entries (CQEs) [aka work completions (WCs)].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * Since the CQ callback is called from IRQ context, we split work by using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * bottom halves implemented by tasklets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * SMC uses this to exchange LLC (link layer control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * and CDC (connection data control) messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Author(s):  Steffen Maier <maier@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include "smc_wr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define SMC_WR_MAX_POLL_CQE 10	/* max. # of compl. queue elements in 1 poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define SMC_WR_RX_HASH_BITS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) struct smc_wr_tx_pend {	/* control data for a pending send request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	u64			wr_id;		/* work request id sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	smc_wr_tx_handler	handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	enum ib_wc_status	wc_status;	/* CQE status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct smc_link		*link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	u32			idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct smc_wr_tx_pend_priv priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u8			compl_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /******************************** send queue *********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*------------------------------- completion --------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* returns true if at least one tx work request is pending on the given link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline bool smc_wr_is_tx_pend(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 							link->wr_tx_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /* wait till all pending tx work requests on the given link are completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	for (i = 0; i < link->wr_tx_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		if (link->wr_tx_pends[i].wr_id == wr_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return link->wr_tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct smc_wr_tx_pend pnd_snd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct smc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	u32 pnd_snd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	link = wc->qp->qp_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (wc->opcode == IB_WC_REG_MR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (wc->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			link->wr_reg_state = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			link->wr_reg_state = CONFIRMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		smc_wr_wakeup_reg_wait(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if (pnd_snd_idx == link->wr_tx_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		complete(&link->wr_tx_compl[pnd_snd_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/* clear the full struct smc_wr_tx_pend including .priv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	memset(&link->wr_tx_pends[pnd_snd_idx], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	       sizeof(link->wr_tx_pends[pnd_snd_idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	       sizeof(link->wr_tx_bufs[pnd_snd_idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (wc->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		/* terminate link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		smcr_link_down_cond_sched(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (pnd_snd.handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		pnd_snd.handler(&pnd_snd.priv, link, wc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	wake_up(&link->wr_tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void smc_wr_tx_tasklet_fn(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct smc_ib_device *dev = (struct smc_ib_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	int i = 0, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	int polled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	polled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		memset(&wc, 0, sizeof(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		if (polled == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			ib_req_notify_cq(dev->roce_cq_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 					 IB_CQ_NEXT_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					 IB_CQ_REPORT_MISSED_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		for (i = 0; i < rc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			smc_wr_tx_process_cqe(&wc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	} while (rc > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (polled == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	tasklet_schedule(&dev->send_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*---------------------------- request submission ---------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	*idx = link->wr_tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (!smc_link_sendable(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		if (!test_and_set_bit(*idx, link->wr_tx_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	*idx = link->wr_tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *			and sets info for pending transmit tracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * @link:		Pointer to smc_link used to later send the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * @handler:		Send completion handler function pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * @wr_buf:		Out value returns pointer to message buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * @wr_rdma_buf:	Out value returns pointer to rdma work request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * @wr_pend_priv:	Out value returns pointer serving as handler context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * Return: 0 on success, or -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int smc_wr_tx_get_free_slot(struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			    smc_wr_tx_handler handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			    struct smc_wr_buf **wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			    struct smc_rdma_wr **wr_rdma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			    struct smc_wr_tx_pend_priv **wr_pend_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct smc_link_group *lgr = smc_get_lgr(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct smc_wr_tx_pend *wr_pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u32 idx = link->wr_tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct ib_send_wr *wr_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	u64 wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	*wr_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	*wr_pend_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (in_softirq() || lgr->terminating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		rc = smc_wr_tx_get_free_slot_index(link, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		rc = wait_event_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			link->wr_tx_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			!smc_link_sendable(link) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			lgr->terminating ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			SMC_WR_TX_WAIT_FREE_SLOT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			/* timeout - terminate link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			smcr_link_down_cond_sched(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		if (idx == link->wr_tx_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	wr_id = smc_wr_tx_get_next_wr_id(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	wr_pend = &link->wr_tx_pends[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	wr_pend->wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	wr_pend->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	wr_pend->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	wr_pend->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	wr_ib = &link->wr_tx_ibs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	wr_ib->wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	*wr_buf = &link->wr_tx_bufs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (wr_rdma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		*wr_rdma_buf = &link->wr_tx_rdmas[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	*wr_pend_priv = &wr_pend->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int smc_wr_tx_put_slot(struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		       struct smc_wr_tx_pend_priv *wr_pend_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	struct smc_wr_tx_pend *pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (pend->idx < link->wr_tx_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		u32 idx = pend->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		/* clear the full struct smc_wr_tx_pend including .priv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		memset(&link->wr_tx_pends[idx], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		       sizeof(link->wr_tx_pends[idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		memset(&link->wr_tx_bufs[idx], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		       sizeof(link->wr_tx_bufs[idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		test_and_clear_bit(idx, link->wr_tx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		wake_up(&link->wr_tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Send prepared WR slot via ib_post_send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	struct smc_wr_tx_pend *pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	ib_req_notify_cq(link->smcibdev->roce_cq_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	pend = container_of(priv, struct smc_wr_tx_pend, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		smc_wr_tx_put_slot(link, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		smcr_link_down_cond_sched(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Send prepared WR slot via ib_post_send and wait for send completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct smc_wr_tx_pend *pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	u32 pnd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	pend = container_of(priv, struct smc_wr_tx_pend, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	pend->compl_requested = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	pnd_idx = pend->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	init_completion(&link->wr_tx_compl[pnd_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	rc = smc_wr_tx_send(link, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	/* wait for completion by smc_wr_tx_process_cqe() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	rc = wait_for_completion_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 					&link->wr_tx_compl[pnd_idx], timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (rc <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		rc = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (rc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Register a memory region and wait for result. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	ib_req_notify_cq(link->smcibdev->roce_cq_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	link->wr_reg_state = POSTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	link->wr_reg.mr = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	link->wr_reg.key = mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	atomic_inc(&link->wr_reg_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	rc = wait_event_interruptible_timeout(link->wr_reg_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 					      (link->wr_reg_state != POSTED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 					      SMC_WR_REG_MR_WAIT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	if (atomic_dec_and_test(&link->wr_reg_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		wake_up_all(&link->wr_reg_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		/* timeout - terminate link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		smcr_link_down_cond_sched(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (rc == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	switch (link->wr_reg_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	case CONFIRMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	case FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	case POSTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		rc = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /****************************** receive queue ********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct smc_wr_rx_handler *h_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	spin_lock(&smc_wr_rx_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		if (h_iter->type == handler->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			rc = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	hash_add(smc_wr_rx_hash, &handler->list, handler->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	spin_unlock(&smc_wr_rx_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Demultiplex a received work request based on the message type to its handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * and not being modified any more afterwards so we don't need to lock it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	struct smc_wr_rx_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	struct smc_wr_rx_hdr *wr_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	u64 temp_wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (wc->byte_len < sizeof(*wr_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		return; /* short message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	temp_wr_id = wc->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	index = do_div(temp_wr_id, link->wr_rx_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		if (handler->type == wr_rx->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			handler->handler(wc, wr_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct smc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		link = wc[i].qp->qp_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		if (wc[i].status == IB_WC_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			link->wr_rx_tstamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			smc_wr_rx_demultiplex(&wc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			smc_wr_rx_post(link); /* refill WR RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			/* handle status errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			switch (wc[i].status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			case IB_WC_RETRY_EXC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			case IB_WC_RNR_RETRY_EXC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			case IB_WC_WR_FLUSH_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				smcr_link_down_cond_sched(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 				smc_wr_rx_post(link); /* refill WR RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void smc_wr_rx_tasklet_fn(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	struct smc_ib_device *dev = (struct smc_ib_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	int polled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	polled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		memset(&wc, 0, sizeof(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		if (polled == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			ib_req_notify_cq(dev->roce_cq_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 					 IB_CQ_SOLICITED_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 					 | IB_CQ_REPORT_MISSED_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		smc_wr_rx_process_cqes(&wc[0], rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	} while (rc > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (polled == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	tasklet_schedule(&dev->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int smc_wr_rx_post_init(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	for (i = 0; i < link->wr_rx_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		rc = smc_wr_rx_post(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /***************************** init, exit, misc ******************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void smc_wr_remember_qp_attr(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	struct ib_qp_attr *attr = &lnk->qp_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	struct ib_qp_init_attr init_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	memset(attr, 0, sizeof(*attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	memset(&init_attr, 0, sizeof(init_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	ib_query_qp(lnk->roce_qp, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		    IB_QP_STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		    IB_QP_CUR_STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		    IB_QP_PKEY_INDEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		    IB_QP_PORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		    IB_QP_QKEY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		    IB_QP_AV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		    IB_QP_PATH_MTU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		    IB_QP_TIMEOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		    IB_QP_RETRY_CNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		    IB_QP_RNR_RETRY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		    IB_QP_RQ_PSN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		    IB_QP_ALT_PATH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		    IB_QP_MIN_RNR_TIMER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		    IB_QP_SQ_PSN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		    IB_QP_PATH_MIG_STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		    IB_QP_CAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		    IB_QP_DEST_QPN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		    &init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			       lnk->qp_attr.cap.max_send_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			       lnk->qp_attr.cap.max_recv_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static void smc_wr_init_sge(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	for (i = 0; i < lnk->wr_tx_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		lnk->wr_tx_sges[i].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			lnk->roce_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			lnk->roce_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			lnk->roce_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			lnk->roce_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		lnk->wr_tx_ibs[i].next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		lnk->wr_tx_ibs[i].num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		lnk->wr_tx_ibs[i].send_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			IB_SEND_SIGNALED | IB_SEND_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	for (i = 0; i < lnk->wr_rx_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		lnk->wr_rx_sges[i].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		lnk->wr_rx_ibs[i].next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		lnk->wr_rx_ibs[i].num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	lnk->wr_reg.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	lnk->wr_reg.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void smc_wr_free_link(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct ib_device *ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (!lnk->smcibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	ibdev = lnk->smcibdev->ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	smc_wr_wakeup_reg_wait(lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	smc_wr_wakeup_tx_wait(lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	smc_wr_tx_wait_no_pending_sends(lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (lnk->wr_rx_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 				    SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 				    DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		lnk->wr_rx_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (lnk->wr_tx_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 				    SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 				    DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		lnk->wr_tx_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void smc_wr_free_link_mem(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	kfree(lnk->wr_tx_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	lnk->wr_tx_compl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	kfree(lnk->wr_tx_pends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	lnk->wr_tx_pends = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	kfree(lnk->wr_tx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	lnk->wr_tx_mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	kfree(lnk->wr_tx_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	lnk->wr_tx_sges = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	kfree(lnk->wr_tx_rdma_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	lnk->wr_tx_rdma_sges = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	kfree(lnk->wr_rx_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	lnk->wr_rx_sges = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	kfree(lnk->wr_tx_rdmas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	lnk->wr_tx_rdmas = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	kfree(lnk->wr_rx_ibs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	lnk->wr_rx_ibs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	kfree(lnk->wr_tx_ibs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	lnk->wr_tx_ibs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	kfree(lnk->wr_tx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	lnk->wr_tx_bufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	kfree(lnk->wr_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	lnk->wr_rx_bufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int smc_wr_alloc_link_mem(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	/* allocate link related memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	if (!link->wr_tx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		goto no_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	if (!link->wr_rx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		goto no_mem_wr_tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 				  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	if (!link->wr_tx_ibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		goto no_mem_wr_rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 				  sizeof(link->wr_rx_ibs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 				  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (!link->wr_rx_ibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		goto no_mem_wr_tx_ibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 				    sizeof(link->wr_tx_rdmas[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (!link->wr_tx_rdmas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		goto no_mem_wr_rx_ibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 					sizeof(link->wr_tx_rdma_sges[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	if (!link->wr_tx_rdma_sges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		goto no_mem_wr_tx_rdmas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (!link->wr_tx_sges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		goto no_mem_wr_tx_rdma_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				   sizeof(link->wr_rx_sges[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	if (!link->wr_rx_sges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		goto no_mem_wr_tx_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 				   sizeof(*link->wr_tx_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (!link->wr_tx_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		goto no_mem_wr_rx_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 				    sizeof(link->wr_tx_pends[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	if (!link->wr_tx_pends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		goto no_mem_wr_tx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 				    sizeof(link->wr_tx_compl[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	if (!link->wr_tx_compl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		goto no_mem_wr_tx_pends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) no_mem_wr_tx_pends:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	kfree(link->wr_tx_pends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) no_mem_wr_tx_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	kfree(link->wr_tx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) no_mem_wr_rx_sges:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	kfree(link->wr_rx_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) no_mem_wr_tx_sges:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	kfree(link->wr_tx_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) no_mem_wr_tx_rdma_sges:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	kfree(link->wr_tx_rdma_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) no_mem_wr_tx_rdmas:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	kfree(link->wr_tx_rdmas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) no_mem_wr_rx_ibs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	kfree(link->wr_rx_ibs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) no_mem_wr_tx_ibs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	kfree(link->wr_tx_ibs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) no_mem_wr_rx_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	kfree(link->wr_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) no_mem_wr_tx_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	kfree(link->wr_tx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) no_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	tasklet_kill(&smcibdev->recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	tasklet_kill(&smcibdev->send_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) void smc_wr_add_dev(struct smc_ib_device *smcibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		     (unsigned long)smcibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		     (unsigned long)smcibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int smc_wr_create_link(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	struct ib_device *ibdev = lnk->smcibdev->ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	lnk->wr_rx_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	lnk->wr_rx_dma_addr = ib_dma_map_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		ibdev, lnk->wr_rx_bufs,	SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		lnk->wr_rx_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	lnk->wr_tx_dma_addr = ib_dma_map_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		ibdev, lnk->wr_tx_bufs,	SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	smc_wr_init_sge(lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	memset(lnk->wr_tx_mask, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	       BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	init_waitqueue_head(&lnk->wr_tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	atomic_set(&lnk->wr_tx_refcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	init_waitqueue_head(&lnk->wr_reg_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	atomic_set(&lnk->wr_reg_refcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dma_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 			    SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 			    DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	lnk->wr_rx_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }