^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Work Requests exploiting Infiniband API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifndef SMC_WR_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define SMC_WR_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "smc_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SMC_WR_TX_PEND_PRIV_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct smc_wr_tx_pend_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u8 priv[SMC_WR_TX_PEND_PRIV_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) typedef void (*smc_wr_tx_handler)(struct smc_wr_tx_pend_priv *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct smc_link *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) enum ib_wc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) typedef bool (*smc_wr_tx_filter)(struct smc_wr_tx_pend_priv *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) typedef void (*smc_wr_tx_dismisser)(struct smc_wr_tx_pend_priv *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct smc_wr_rx_handler {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct hlist_node list; /* hash table collision resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void (*handler)(struct ib_wc *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Only used by RDMA write WRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * All other WRs (CDC/LLC) use smc_wr_tx_send handling WR_ID implicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline long smc_wr_tx_get_next_wr_id(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return atomic_long_inc_return(&link->wr_tx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) atomic_long_set(wr_tx_id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline bool smc_wr_tx_link_hold(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!smc_link_sendable(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) atomic_inc(&link->wr_tx_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline void smc_wr_tx_link_put(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (atomic_dec_and_test(&link->wr_tx_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) wake_up_all(&link->wr_tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) wake_up_all(&lnk->wr_tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void smc_wr_wakeup_reg_wait(struct smc_link *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) wake_up(&lnk->wr_reg_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* post a new receive work request to fill a completed old work request entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline int smc_wr_rx_post(struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u64 wr_id, temp_wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) temp_wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) index = do_div(temp_wr_id, link->wr_rx_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) link->wr_rx_ibs[index].wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int smc_wr_create_link(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int smc_wr_alloc_link_mem(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void smc_wr_free_link(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void smc_wr_free_link_mem(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void smc_wr_remember_qp_attr(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void smc_wr_remove_dev(struct smc_ib_device *smcibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void smc_wr_add_dev(struct smc_ib_device *smcibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct smc_wr_buf **wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct smc_rdma_wr **wrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct smc_wr_tx_pend_priv **wr_pend_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int smc_wr_tx_put_slot(struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct smc_wr_tx_pend_priv *wr_pend_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int smc_wr_tx_send(struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct smc_wr_tx_pend_priv *wr_pend_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) smc_wr_tx_filter filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) smc_wr_tx_dismisser dismisser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int smc_wr_rx_post_init(struct smc_link *link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif /* SMC_WR_H */