^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RDMA Transport Layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "rtrs-srv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "rtrs-log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <rdma/ib_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) MODULE_DESCRIPTION("RDMA Transport Server");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DEFAULT_SESS_QUEUE_DEPTH 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define MAX_HDR_SIZE PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* We guarantee to serve 10 paths at least */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CHUNK_POOL_SZ 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct rtrs_rdma_dev_pd dev_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static mempool_t *chunk_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct class *rtrs_dev_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static struct rtrs_srv_ib_ctx ib_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static bool always_invalidate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) module_param(always_invalidate, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) MODULE_PARM_DESC(always_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "Invalidate memory registration for contiguous memory regions before accessing.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) module_param_named(max_chunk_size, max_chunk_size, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) MODULE_PARM_DESC(max_chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) "Max size for each IO request, when change the unit is in byte (default: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_PARM_DESC(sess_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) "Number of buffers for pending I/O requests to allocate per session. Maximum: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct workqueue_struct *rtrs_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return container_of(c, struct rtrs_srv_con, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return container_of(s, struct rtrs_srv_sess, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) enum rtrs_srv_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) enum rtrs_srv_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) lockdep_assert_held(&sess->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) old_state = sess->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case RTRS_SRV_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) switch (old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) case RTRS_SRV_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) case RTRS_SRV_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) switch (old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case RTRS_SRV_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case RTRS_SRV_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) case RTRS_SRV_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) switch (old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) case RTRS_SRV_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) sess->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static bool rtrs_srv_change_state_get_old(struct rtrs_srv_sess *sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enum rtrs_srv_state new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) enum rtrs_srv_state *old_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) spin_lock_irq(&sess->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *old_state = sess->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) changed = __rtrs_srv_change_state(sess, new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spin_unlock_irq(&sess->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) enum rtrs_srv_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) enum rtrs_srv_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return rtrs_srv_change_state_get_old(sess, new_state, &old_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void free_id(struct rtrs_srv_op *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) WARN_ON(atomic_read(&sess->ids_inflight));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (sess->ops_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (i = 0; i < srv->queue_depth; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) free_id(sess->ops_ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kfree(sess->ops_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sess->ops_ids = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static struct ib_cqe io_comp_cqe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .done = rtrs_srv_rdma_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct rtrs_srv_op *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!sess->ops_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) for (i = 0; i < srv->queue_depth; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) id = kzalloc(sizeof(*id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) sess->ops_ids[i] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) init_waitqueue_head(&sess->ids_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) atomic_set(&sess->ids_inflight, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) rtrs_srv_free_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) atomic_inc(&sess->ids_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (atomic_dec_and_test(&sess->ids_inflight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) wake_up(&sess->ids_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct rtrs_srv_con *con = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (unlikely(wc->status != IB_WC_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rtrs_err(s, "REG MR failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ib_wc_status_msg(wc->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static struct ib_cqe local_reg_cqe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .done = rtrs_srv_reg_mr_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int rdma_write_sg(struct rtrs_srv_op *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct rtrs_sess *s = id->con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct rtrs_srv_mr *srv_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct ib_send_wr inv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct ib_rdma_wr imm_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct ib_rdma_wr *wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) enum ib_send_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) size_t sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int err, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool need_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 rkey = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct ib_reg_wr rwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct ib_sge *plist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct ib_sge list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (unlikely(sg_cnt != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) wr = &id->tx_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) plist = &id->tx_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) plist->addr = dma_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* WR will fail with length error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * if this is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (unlikely(plist->length == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) offset += plist->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) wr->wr.sg_list = plist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) wr->wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (rkey == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rkey = wr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Only one key is actually used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) WARN_ON_ONCE(rkey != wr->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) wr->wr.opcode = IB_WR_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) wr->wr.wr_cqe = &io_comp_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) wr->wr.ex.imm_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) wr->wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (need_inval && always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) wr->wr.next = &rwr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rwr.wr.next = &inv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) inv_wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) } else if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) wr->wr.next = &rwr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rwr.wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else if (need_inval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) wr->wr.next = &inv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) inv_wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) wr->wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * From time to time we have to post signaled sends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * or send queue will fill up and only QP reset can help.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 0 : IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (need_inval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) inv_wr.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) inv_wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) inv_wr.opcode = IB_WR_SEND_WITH_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) inv_wr.wr_cqe = &io_comp_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) inv_wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) inv_wr.ex.invalidate_rkey = rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) imm_wr.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct rtrs_msg_rkey_rsp *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) srv_mr = &sess->mrs[id->msg_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rwr.wr.opcode = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rwr.wr.wr_cqe = &local_reg_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rwr.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rwr.mr = srv_mr->mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rwr.wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) rwr.key = srv_mr->mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rwr.access = (IB_ACCESS_LOCAL_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) IB_ACCESS_REMOTE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) msg = srv_mr->iu->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) msg->buf_id = cpu_to_le16(id->msg_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) list.addr = srv_mr->iu->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) list.length = sizeof(*msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) imm_wr.wr.sg_list = &list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) imm_wr.wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) srv_mr->iu->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) srv_mr->iu->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) imm_wr.wr.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) imm_wr.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) imm_wr.wr.send_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 0, need_inval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) imm_wr.wr.wr_cqe = &io_comp_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) offset, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rtrs_err(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) "Posting RDMA-Write-Request to QP failed, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * requests or on successful WRITE request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * @con: the connection to send back result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @id: the id associated with the IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @errno: the error number of the IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Return 0 on success, errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct ib_send_wr inv_wr, *wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct ib_rdma_wr imm_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct ib_reg_wr rwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct rtrs_srv_mr *srv_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) bool need_inval = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) enum ib_send_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) u32 imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (id->dir == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) size_t sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) need_inval = le16_to_cpu(rd_msg->flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) RTRS_MSG_NEED_INVAL_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (need_inval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (likely(sg_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) inv_wr.wr_cqe = &io_comp_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) inv_wr.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) inv_wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) inv_wr.opcode = IB_WR_SEND_WITH_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) inv_wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* Only one key is actually used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) inv_wr.ex.invalidate_rkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) le32_to_cpu(rd_msg->desc[0].key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) need_inval = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (need_inval && always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) wr = &inv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) inv_wr.next = &rwr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rwr.wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) wr = &rwr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rwr.wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) } else if (need_inval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) wr = &inv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) inv_wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) wr = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * From time to time we have to post signalled sends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * or send queue will fill up and only QP reset can help.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 0 : IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) imm_wr.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct ib_sge list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct rtrs_msg_rkey_rsp *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) srv_mr = &sess->mrs[id->msg_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) rwr.wr.next = &imm_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) rwr.wr.opcode = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rwr.wr.wr_cqe = &local_reg_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) rwr.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) rwr.wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rwr.mr = srv_mr->mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) rwr.key = srv_mr->mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rwr.access = (IB_ACCESS_LOCAL_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) IB_ACCESS_REMOTE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) msg = srv_mr->iu->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) msg->buf_id = cpu_to_le16(id->msg_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) list.addr = srv_mr->iu->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) list.length = sizeof(*msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) imm_wr.wr.sg_list = &list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) imm_wr.wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) srv_mr->iu->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) srv_mr->iu->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) imm_wr.wr.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) imm_wr.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) imm_wr.wr.send_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) imm_wr.wr.wr_cqe = &io_comp_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err = ib_post_send(id->con->c.qp, wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) void close_sess(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) enum rtrs_srv_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (rtrs_srv_change_state_get_old(sess, RTRS_SRV_CLOSING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) &old_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) queue_work(rtrs_wq, &sess->close_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) WARN_ON(sess->state != RTRS_SRV_CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) case RTRS_SRV_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return "RTRS_SRV_CONNECTING";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) case RTRS_SRV_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return "RTRS_SRV_CONNECTED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case RTRS_SRV_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return "RTRS_SRV_CLOSING";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case RTRS_SRV_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return "RTRS_SRV_CLOSED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return "UNKNOWN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * rtrs_srv_resp_rdma() - Finish an RDMA request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @id: Internal RTRS operation identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @status: Response Code sent to the other side for this operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * 0 = success, <=0 error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * Context: any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Finish a RDMA operation. A message is sent to the client and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * corresponding memory areas will be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct rtrs_srv_con *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct rtrs_sess *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (WARN_ON(!id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) con = id->con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) id->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) "Sending I/O response failed, session is disconnected, sess state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rtrs_srv_state_str(sess->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (unlikely(atomic_sub_return(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) &con->sq_wr_avail) < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pr_err("IB send queue full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) atomic_add(1, &con->sq_wr_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_lock(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_unlock(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) err = send_io_resp_imm(con, id, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) err = rdma_write_sg(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) rtrs_err_rl(s, "IO response failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rtrs_srv_put_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) EXPORT_SYMBOL(rtrs_srv_resp_rdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * @srv: Session pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * @priv: The private pointer that is associated with the session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) srv->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) for (i = 0; i < sess->mrs_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct rtrs_srv_mr *srv_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) srv_mr = &sess->mrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ib_dereg_mr(srv_mr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) sg_free_table(&srv_mr->sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) kfree(sess->mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static int map_cont_bufs(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct rtrs_sess *ss = &sess->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int i, mri, err, mrs_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned int chunk_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int chunks_per_mr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * Here we map queue_depth chunks to MR. Firstly we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * figure out how many chunks can we map per MR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * in order to do invalidate for each chunks of memory, we needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * more memory regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mrs_num = srv->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) chunks_per_mr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!sess->mrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sess->mrs_num = mrs_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) for (mri = 0; mri < mrs_num; mri++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct sg_table *sgt = &srv_mr->sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct ib_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int nr, chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) chunks = chunks_per_mr * mri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!always_invalidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) chunks_per_mr = min_t(int, chunks_per_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) srv->queue_depth - chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for_each_sg(sgt->sgl, s, chunks_per_mr, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) sg_set_page(s, srv->chunks[chunks + i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) max_chunk_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) sgt->nents, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (nr < sgt->nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) err = nr < 0 ? nr : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto free_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) sgt->nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (IS_ERR(mr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) err = PTR_ERR(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) NULL, max_chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (nr < 0 || nr < sgt->nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) err = nr < 0 ? nr : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) goto dereg_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) srv_mr->iu = rtrs_iu_alloc(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) sizeof(struct rtrs_msg_rkey_rsp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) GFP_KERNEL, sess->s.dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) DMA_TO_DEVICE, rtrs_srv_rdma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!srv_mr->iu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) goto dereg_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Eventually dma addr for each chunk can be cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) sess->dma_addr[chunks + i] = sg_dma_address(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) srv_mr->mr = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) while (mri--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) srv_mr = &sess->mrs[mri];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) sgt = &srv_mr->sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) mr = srv_mr->mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dereg_mr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ib_dereg_mr(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) unmap_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) sgt->nents, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) free_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) kfree(sess->mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) chunk_bits = ilog2(srv->queue_depth - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) close_sess(to_srv_sess(c->sess));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rtrs_init_hb(&sess->s, &io_comp_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) RTRS_HB_INTERVAL_MS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) RTRS_HB_MISSED_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rtrs_srv_hb_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) rtrs_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) rtrs_start_hb(&sess->s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rtrs_stop_hb(&sess->s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct rtrs_srv_con *con = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct rtrs_iu *iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (unlikely(wc->status != IB_WC_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) rtrs_err(s, "Sess info response send failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ib_wc_status_msg(wc->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) WARN_ON(wc->opcode != IB_WC_SEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct rtrs_srv_ctx *ctx = srv->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) mutex_lock(&srv->paths_ev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) up = ++srv->paths_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (up == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mutex_unlock(&srv->paths_ev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Mark session as established */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) sess->established = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct rtrs_srv_ctx *ctx = srv->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!sess->established)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) sess->established = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mutex_lock(&srv->paths_ev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) WARN_ON(!srv->paths_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (--srv->paths_up == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) mutex_unlock(&srv->paths_ev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int post_recv_sess(struct rtrs_srv_sess *sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static int process_info_req(struct rtrs_srv_con *con,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct rtrs_msg_info_req *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct ib_send_wr *reg_wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct rtrs_msg_info_rsp *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct rtrs_iu *tx_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct ib_reg_wr *rwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int mri, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) size_t tx_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) err = post_recv_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) rtrs_err(s, "post_recv_sess(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (unlikely(!rwr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) strlcpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) tx_sz = sizeof(*rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (unlikely(!tx_iu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) goto rwr_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) rsp = tx_iu->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (mri = 0; mri < sess->mrs_num; mri++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct ib_mr *mr = sess->mrs[mri].mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rsp->desc[mri].addr = cpu_to_le64(mr->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rsp->desc[mri].key = cpu_to_le32(mr->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) rsp->desc[mri].len = cpu_to_le32(mr->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Fill in reg MR request and chain them *backwards*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) rwr[mri].wr.opcode = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) rwr[mri].wr.wr_cqe = &local_reg_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) rwr[mri].wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) rwr[mri].wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) rwr[mri].mr = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) rwr[mri].key = mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) IB_ACCESS_REMOTE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) reg_wr = &rwr[mri].wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) err = rtrs_srv_create_sess_files(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto iu_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kobject_get(&sess->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) get_device(&sess->srv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rtrs_srv_start_hb(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * We do not account number of established connections at the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * moment, we rely on the client, which should send info request when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * all connections are successfully established. Thus, simply notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * listener with a proper event if we are the first path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rtrs_srv_sess_up(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tx_iu->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Send info response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) iu_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) rwr_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) kfree(rwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct rtrs_srv_con *con = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct rtrs_msg_info_req *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct rtrs_iu *iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) WARN_ON(con->c.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (unlikely(wc->status != IB_WC_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) rtrs_err(s, "Sess info request receive failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ib_wc_status_msg(wc->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) WARN_ON(wc->opcode != IB_WC_RECV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (unlikely(wc->byte_len < sizeof(*msg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) rtrs_err(s, "Sess info request is malformed: size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) wc->byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) iu->size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) msg = iu->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rtrs_err(s, "Sess info request is malformed: type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) le16_to_cpu(msg->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) err = process_info_req(con, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int post_recv_info_req(struct rtrs_srv_con *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct rtrs_iu *rx_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) GFP_KERNEL, sess->s.dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) DMA_FROM_DEVICE, rtrs_srv_info_req_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (unlikely(!rx_iu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* Prepare for getting info response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) err = rtrs_iu_post_recv(&con->c, rx_iu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) for (i = 0; i < q_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static int post_recv_sess(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct rtrs_sess *s = &sess->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) size_t q_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int err, cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) for (cid = 0; cid < sess->s.con_num; cid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (cid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) q_size = SERVICE_CON_QUEUE_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) q_size = srv->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) rtrs_err(s, "post_recv_io(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static void process_read(struct rtrs_srv_con *con,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct rtrs_msg_rdma_read *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u32 buf_id, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct rtrs_srv_ctx *ctx = srv->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct rtrs_srv_op *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) size_t usr_len, data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) "Processing read request failed, session is disconnected, sess state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) rtrs_srv_state_str(sess->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) "Processing read request failed, invalid message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) rtrs_srv_get_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rtrs_srv_update_rdma_stats(sess->stats, off, READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) id = sess->ops_ids[buf_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) id->con = con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) id->dir = READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) id->msg_id = buf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) id->rd_msg = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) usr_len = le16_to_cpu(msg->usr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) data_len = off - usr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) data = page_address(srv->chunks[buf_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ret = ctx->ops.rdma_ev(srv, srv->priv, id, READ, data, data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) data + data_len, usr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) buf_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) goto send_err_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) send_err_msg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ret = send_io_resp_imm(con, id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) buf_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) rtrs_srv_put_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void process_write(struct rtrs_srv_con *con,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct rtrs_msg_rdma_write *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) u32 buf_id, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct rtrs_srv_ctx *ctx = srv->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct rtrs_srv_op *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) size_t data_len, usr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) "Processing write request failed, session is disconnected, sess state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rtrs_srv_state_str(sess->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) rtrs_srv_get_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) id = sess->ops_ids[buf_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) id->con = con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) id->dir = WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) id->msg_id = buf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) usr_len = le16_to_cpu(req->usr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) data_len = off - usr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) data = page_address(srv->chunks[buf_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = ctx->ops.rdma_ev(srv, srv->priv, id, WRITE, data, data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) data + data_len, usr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) "Processing write request failed, user module callback reports err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto send_err_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) send_err_msg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ret = send_io_resp_imm(con, id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) rtrs_err_rl(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) buf_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rtrs_srv_put_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void process_io_req(struct rtrs_srv_con *con, void *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) u32 id, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct rtrs_msg_rdma_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) max_chunk_size, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) hdr = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) type = le16_to_cpu(hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) case RTRS_MSG_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) process_write(con, msg, id, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) case RTRS_MSG_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) process_read(con, msg, id, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) rtrs_err(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) "Processing I/O request failed, unknown message type received: 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct rtrs_srv_mr *mr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct rtrs_srv_con *con = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) u32 msg_id, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (unlikely(wc->status != IB_WC_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ib_wc_status_msg(wc->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) msg_id = mr->msg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) off = mr->msg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) data = page_address(srv->chunks[msg_id]) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) process_io_req(con, data, msg_id, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct rtrs_srv_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct ib_send_wr wr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .opcode = IB_WR_LOCAL_INV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .wr_cqe = &mr->inv_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .send_flags = IB_SEND_SIGNALED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .ex.invalidate_rkey = mr->mr->rkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return ib_post_send(con->c.qp, &wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) spin_lock(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) while (!list_empty(&con->rsp_wr_wait_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct rtrs_srv_op *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) id = list_entry(con->rsp_wr_wait_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct rtrs_srv_op, wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) list_del(&id->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) spin_unlock(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ret = rtrs_srv_resp_rdma(id, id->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) spin_lock(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) list_add(&id->wait_list, &con->rsp_wr_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) spin_unlock(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct rtrs_srv_con *con = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct rtrs_sess *s = con->c.sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct rtrs_srv_sess *sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) u32 imm_type, imm_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (unlikely(wc->status != IB_WC_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (wc->status != IB_WC_WR_FLUSH_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) rtrs_err(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ib_wc_status_msg(wc->status), wc->wr_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) wc->opcode, wc->vendor_err, wc->byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) switch (wc->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) case IB_WC_RECV_RDMA_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * post_recv() RDMA write completions of IO reqs (read/write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * and hb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) &imm_type, &imm_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (likely(imm_type == RTRS_IO_REQ_IMM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) u32 msg_id, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) msg_id = imm_payload >> sess->mem_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) off = imm_payload & ((1 << sess->mem_bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (unlikely(msg_id >= srv->queue_depth ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) off >= max_chunk_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) rtrs_err(s, "Wrong msg_id %u, off %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) msg_id, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (always_invalidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) mr->msg_off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) mr->msg_id = msg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) err = rtrs_srv_inv_rkey(con, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) rtrs_err(s, "rtrs_post_recv(), err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) data = page_address(srv->chunks[msg_id]) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) process_io_req(con, data, msg_id, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) } else if (imm_type == RTRS_HB_MSG_IMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) WARN_ON(con->c.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) rtrs_send_hb_ack(&sess->s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) } else if (imm_type == RTRS_HB_ACK_IMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) WARN_ON(con->c.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) sess->s.hb_missed_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) case IB_WC_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) case IB_WC_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * post_send() RDMA write completions of IO reqs (read/write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) atomic_add(srv->queue_depth, &con->sq_wr_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) rtrs_rdma_process_wr_wait_list(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * @srv: Session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * @sessname: Sessname buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * @len: Length of sessname buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) mutex_lock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) list_for_each_entry(sess, &srv->paths_list, s.entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (sess->state != RTRS_SRV_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) strlcpy(sessname, sess->s.sessname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) min_t(size_t, sizeof(sess->s.sessname), len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) EXPORT_SYMBOL(rtrs_srv_get_sess_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * rtrs_srv_get_sess_qdepth() - Get rtrs_srv qdepth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * @srv: Session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return srv->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static int find_next_bit_ring(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct ib_device *ib_dev = sess->s.dev->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) int v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) v = cpumask_first(&cq_affinity_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) sess->cur_cq_vector = find_next_bit_ring(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return sess->cur_cq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static void rtrs_srv_dev_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) kfree(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static void free_srv(struct rtrs_srv *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) WARN_ON(refcount_read(&srv->refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) for (i = 0; i < srv->queue_depth; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) mempool_free(srv->chunks[i], chunk_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) kfree(srv->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) mutex_destroy(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) mutex_destroy(&srv->paths_ev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* last put to release the srv structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) put_device(&srv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) const uuid_t *paths_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) bool first_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct rtrs_srv *srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) mutex_lock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) refcount_inc_not_zero(&srv->refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) mutex_unlock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) mutex_unlock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * If this request is not the first connection request from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * client for this session then fail and return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (!first_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return ERR_PTR(-ENXIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* need to allocate a new srv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) srv = kzalloc(sizeof(*srv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (!srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) INIT_LIST_HEAD(&srv->paths_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) mutex_init(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) mutex_init(&srv->paths_ev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) uuid_copy(&srv->paths_uuid, paths_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) srv->queue_depth = sess_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) srv->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) device_initialize(&srv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) srv->dev.release = rtrs_srv_dev_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!srv->chunks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) goto err_free_srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) for (i = 0; i < srv->queue_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (!srv->chunks[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) goto err_free_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) refcount_set(&srv->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) mutex_lock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) list_add(&srv->ctx_list, &ctx->srv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) mutex_unlock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) err_free_chunks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) mempool_free(srv->chunks[i], chunk_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) kfree(srv->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) err_free_srv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) kfree(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) static void put_srv(struct rtrs_srv *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (refcount_dec_and_test(&srv->refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct rtrs_srv_ctx *ctx = srv->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) WARN_ON(srv->dev.kobj.state_in_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) mutex_lock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) list_del(&srv->ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) mutex_unlock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) free_srv(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void __add_path_to_srv(struct rtrs_srv *srv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) list_add_tail(&sess->s.entry, &srv->paths_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) srv->paths_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static void del_path_from_srv(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (WARN_ON(!srv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) mutex_lock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) list_del(&sess->s.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) WARN_ON(!srv->paths_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) srv->paths_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* return true if addresses are the same, error other wise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) switch (a->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) case AF_IB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) &((struct sockaddr_ib *)b)->sib_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) sizeof(struct ib_addr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) (b->sa_family == AF_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return memcmp(&((struct sockaddr_in *)a)->sin_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) &((struct sockaddr_in *)b)->sin_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) sizeof(struct in_addr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) (b->sa_family == AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) &((struct sockaddr_in6 *)b)->sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) sizeof(struct in6_addr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) (b->sa_family == AF_INET6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct rdma_addr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) list_for_each_entry(sess, &srv->paths_list, s.entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) (struct sockaddr *)&addr->dst_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) (struct sockaddr *)&addr->src_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static void free_sess(struct rtrs_srv_sess *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (sess->kobj.state_in_sysfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) kobject_del(&sess->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) kobject_put(&sess->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) kfree(sess->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) kfree(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static void rtrs_srv_close_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct rtrs_srv_con *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) sess = container_of(work, typeof(*sess), close_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) rtrs_srv_destroy_sess_files(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) rtrs_srv_stop_hb(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) for (i = 0; i < sess->s.con_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!sess->s.con[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) con = to_srv_con(sess->s.con[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) rdma_disconnect(con->c.cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) ib_drain_qp(con->c.qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Wait for all inflights */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) rtrs_srv_wait_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /* Notify upper layer if we are the last path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) rtrs_srv_sess_down(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) unmap_cont_bufs(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) rtrs_srv_free_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) for (i = 0; i < sess->s.con_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (!sess->s.con[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) con = to_srv_con(sess->s.con[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) rtrs_cq_qp_destroy(&con->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) rdma_destroy_id(con->c.cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) kfree(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) rtrs_ib_dev_put(sess->s.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) del_path_from_srv(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) put_srv(sess->srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) sess->srv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) kfree(sess->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) kfree(sess->s.con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) free_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct rdma_cm_id *cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct rtrs_msg_conn_rsp msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct rdma_conn_param param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) param = (struct rdma_conn_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) .rnr_retry_count = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .private_data = &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) .private_data_len = sizeof(msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) msg = (struct rtrs_msg_conn_rsp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) .magic = cpu_to_le16(RTRS_MAGIC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) .version = cpu_to_le16(RTRS_PROTO_VER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) .queue_depth = cpu_to_le16(srv->queue_depth),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (always_invalidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) err = rdma_accept(cm_id, ¶m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) pr_err("rdma_accept(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct rtrs_msg_conn_rsp msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) msg = (struct rtrs_msg_conn_rsp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) .magic = cpu_to_le16(RTRS_MAGIC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) .version = cpu_to_le16(RTRS_PROTO_VER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .errno = cpu_to_le16(errno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) pr_err("rdma_reject(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /* Bounce errno back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static struct rtrs_srv_sess *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) __find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) list_for_each_entry(sess, &srv->paths_list, s.entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (uuid_equal(&sess->s.uuid, sess_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static int create_con(struct rtrs_srv_sess *sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct rdma_cm_id *cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) unsigned int cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct rtrs_srv *srv = sess->srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct rtrs_sess *s = &sess->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) struct rtrs_srv_con *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) u32 cq_size, max_send_wr, max_recv_wr, wr_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int err, cq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) con = kzalloc(sizeof(*con), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (!con) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) spin_lock_init(&con->rsp_wr_wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) INIT_LIST_HEAD(&con->rsp_wr_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) con->c.cm_id = cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) con->c.sess = &sess->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) con->c.cid = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) atomic_set(&con->wr_cnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (con->c.cid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * All receive and all send (each requiring invalidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * + 2 for drain and heartbeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) max_recv_wr = SERVICE_CON_QUEUE_DEPTH + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) cq_size = max_send_wr + max_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * In theory we might have queue_depth * 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * outstanding requests if an unsafe global key is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * and we have queue_depth read requests each consisting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * of 32 different addresses. div 3 for mlx5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (always_invalidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) max_send_wr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) min_t(int, wr_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) srv->queue_depth * (1 + 4) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) max_send_wr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) min_t(int, wr_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) srv->queue_depth * (1 + 2) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) max_recv_wr = srv->queue_depth + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * If we have all receive requests posted and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * all write requests posted and each read request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * requires an invalidate request + drain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * and qp gets into error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) cq_size = max_send_wr + max_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) atomic_set(&con->sq_wr_avail, max_send_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) cq_vector = rtrs_srv_get_next_cq_vector(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) max_send_wr, max_recv_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) IB_POLL_WORKQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) goto free_con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (con->c.cid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) err = post_recv_info_req(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) goto free_cqqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) WARN_ON(sess->s.con[cid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) sess->s.con[cid] = &con->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * Change context from server to current connection. The other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * way is to use cm_id->qp->qp_context, which does not work on OFED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) cm_id->context = &con->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) free_cqqp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) rtrs_cq_qp_destroy(&con->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) free_con:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) kfree(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) struct rdma_cm_id *cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) unsigned int con_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) unsigned int recon_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) const uuid_t *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (srv->paths_num >= MAX_PATHS_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) err = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) pr_err("Path with same addr exists\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) sess = kzalloc(sizeof(*sess), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (!sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (!sess->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) goto err_free_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) sess->stats->sess = sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (!sess->dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) goto err_free_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (!sess->s.con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) goto err_free_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) sess->state = RTRS_SRV_CONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) sess->srv = srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) sess->cur_cq_vector = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) sess->s.dst_addr = cm_id->route.addr.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) sess->s.src_addr = cm_id->route.addr.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) sess->s.con_num = con_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) sess->s.recon_cnt = recon_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) uuid_copy(&sess->s.uuid, uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) spin_lock_init(&sess->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) INIT_WORK(&sess->close_work, rtrs_srv_close_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) rtrs_srv_init_hb(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (!sess->s.dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) goto err_free_con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) err = map_cont_bufs(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) goto err_put_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) err = rtrs_srv_alloc_ops_ids(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) goto err_unmap_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) __add_path_to_srv(srv, sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) err_unmap_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) unmap_cont_bufs(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) err_put_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) rtrs_ib_dev_put(sess->s.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) err_free_con:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) kfree(sess->s.con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) err_free_dma_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) kfree(sess->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) err_free_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) kfree(sess->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) err_free_sess:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) kfree(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) const struct rtrs_msg_conn_req *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) struct rtrs_srv_ctx *ctx = cm_id->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) struct rtrs_srv *srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) u16 version, con_num, cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) u16 recon_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (len < sizeof(*msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) pr_err("Invalid RTRS connection request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) pr_err("Invalid RTRS magic\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) version = le16_to_cpu(msg->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) pr_err("Unsupported major RTRS version: %d, expected %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) version >> 8, RTRS_PROTO_VER_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) con_num = le16_to_cpu(msg->cid_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (con_num > 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) pr_err("Too many connections requested: %d\n", con_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) cid = le16_to_cpu(msg->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (cid >= con_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) recon_cnt = le16_to_cpu(msg->recon_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (IS_ERR(srv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) err = PTR_ERR(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) goto reject_w_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) mutex_lock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sess = __find_sess(srv, &msg->sess_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct rtrs_sess *s = &sess->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /* Session already holds a reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) put_srv(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (sess->state != RTRS_SRV_CONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) rtrs_err(s, "Session in wrong state: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) rtrs_srv_state_str(sess->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * Sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (con_num != s->con_num || cid >= s->con_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) rtrs_err(s, "Incorrect request: %d, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) cid, con_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (s->con[cid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) rtrs_err(s, "Connection already exists: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) goto reject_w_econnreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) &msg->sess_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (IS_ERR(sess)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) put_srv(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) err = PTR_ERR(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) goto reject_w_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) err = create_con(sess, cm_id, cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) (void)rtrs_rdma_do_reject(cm_id, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Since session has other connections we follow normal way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * through workqueue, but still return an error to tell cma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * to call rdma_destroy_id() for current connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) goto close_and_return_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) err = rtrs_rdma_do_accept(sess, cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) (void)rtrs_rdma_do_reject(cm_id, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * Since current connection was successfully added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * session we follow normal way through workqueue to close the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * session, thus return 0 to tell cma.c we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * rdma_destroy_id() ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) goto close_and_return_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) reject_w_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return rtrs_rdma_do_reject(cm_id, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) reject_w_econnreset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) close_and_return_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct rdma_cm_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct rtrs_srv_sess *sess = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct rtrs_sess *s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct rtrs_con *c = cm_id->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) s = c->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) sess = to_srv_sess(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) switch (ev->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) case RDMA_CM_EVENT_CONNECT_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * In case of error cma.c will destroy cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * see cma_process_remove()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) ev->param.conn.private_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) case RDMA_CM_EVENT_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /* Nothing here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) case RDMA_CM_EVENT_REJECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) case RDMA_CM_EVENT_CONNECT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) case RDMA_CM_EVENT_UNREACHABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) rdma_event_msg(ev->event), ev->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) case RDMA_CM_EVENT_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) case RDMA_CM_EVENT_ADDR_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) case RDMA_CM_EVENT_TIMEWAIT_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) case RDMA_CM_EVENT_DEVICE_REMOVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) pr_err("Ignoring unexpected CM event %s, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) rdma_event_msg(ev->event), ev->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) enum rdma_ucm_port_space ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct rdma_cm_id *cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) ctx, ps, IB_QPT_RC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) if (IS_ERR(cm_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) ret = PTR_ERR(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) pr_err("Creating id for RDMA connection failed, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) ret = rdma_bind_addr(cm_id, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) pr_err("Binding RDMA address failed, err: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) goto err_cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ret = rdma_listen(cm_id, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) pr_err("Listening on RDMA connection failed, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) goto err_cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) err_cm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) rdma_destroy_id(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct sockaddr_in6 sin = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .sin6_family = AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) .sin6_addr = IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) .sin6_port = htons(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct sockaddr_ib sib = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) .sib_family = AF_IB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) .sib_pkey = cpu_to_be16(0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct rdma_cm_id *cm_ip, *cm_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * We accept both IPoIB and IB connections, so we need to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * two cm id's, one for each socket type and port space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * If the cm initialization of one of the id's fails, we abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (IS_ERR(cm_ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return PTR_ERR(cm_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (IS_ERR(cm_ib)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ret = PTR_ERR(cm_ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) goto free_cm_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) ctx->cm_id_ip = cm_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) ctx->cm_id_ib = cm_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) free_cm_ip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) rdma_destroy_id(cm_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct rtrs_srv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) ctx->ops = *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) mutex_init(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) INIT_LIST_HEAD(&ctx->srv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) WARN_ON(!list_empty(&ctx->srv_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) mutex_destroy(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) static int rtrs_srv_add_one(struct ib_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) struct rtrs_srv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) mutex_lock(&ib_ctx.ib_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (ib_ctx.ib_dev_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * Since our CM IDs are NOT bound to any ib device we will create them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * only once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) ctx = ib_ctx.srv_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * We errored out here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * According to the ib code, if we encounter an error here then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * error code is ignored, and no more calls to our ops are made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) pr_err("Failed to initialize RDMA connection");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * Keep a track on the number of ib devices added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) ib_ctx.ib_dev_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) mutex_unlock(&ib_ctx.ib_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct rtrs_srv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) mutex_lock(&ib_ctx.ib_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) ib_ctx.ib_dev_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (ib_ctx.ib_dev_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * Since our CM IDs are NOT bound to any ib device we will remove them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * only once, when the last device is removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) ctx = ib_ctx.srv_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) rdma_destroy_id(ctx->cm_id_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) rdma_destroy_id(ctx->cm_id_ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) mutex_unlock(&ib_ctx.ib_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static struct ib_client rtrs_srv_client = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) .name = "rtrs_server",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) .add = rtrs_srv_add_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) .remove = rtrs_srv_remove_one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * rtrs_srv_open() - open RTRS server context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * @ops: callback functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * @port: port to listen on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * Creates server context with specified callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * Return a valid pointer on success otherwise PTR_ERR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) struct rtrs_srv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) ctx = alloc_srv_ctx(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) mutex_init(&ib_ctx.ib_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) ib_ctx.srv_ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) ib_ctx.port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) err = ib_register_client(&rtrs_srv_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) free_srv_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) EXPORT_SYMBOL(rtrs_srv_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) static void close_sessions(struct rtrs_srv *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct rtrs_srv_sess *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) mutex_lock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) list_for_each_entry(sess, &srv->paths_list, s.entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) close_sess(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) mutex_unlock(&srv->paths_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) static void close_ctx(struct rtrs_srv_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) struct rtrs_srv *srv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) mutex_lock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) list_for_each_entry(srv, &ctx->srv_list, ctx_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) close_sessions(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) mutex_unlock(&ctx->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) flush_workqueue(rtrs_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * rtrs_srv_close() - close RTRS server context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * @ctx: pointer to server context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * Closes RTRS server context with all client sessions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ib_unregister_client(&rtrs_srv_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) mutex_destroy(&ib_ctx.ib_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) close_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) free_srv_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) EXPORT_SYMBOL(rtrs_srv_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static int check_module_params(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) max_chunk_size, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * Check if IB immediate data size is enough to hold the mem_id and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * offset inside the memory chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if ((ilog2(sess_queue_depth - 1) + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static int __init rtrs_server_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) sess_queue_depth, always_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) rtrs_rdma_dev_pd_init(0, &dev_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) err = check_module_params();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) pr_err("Failed to load module, invalid module parameters, err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) get_order(max_chunk_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (!chunk_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (IS_ERR(rtrs_dev_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) err = PTR_ERR(rtrs_dev_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) goto out_chunk_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (!rtrs_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) goto out_dev_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) out_dev_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) class_destroy(rtrs_dev_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) out_chunk_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) mempool_destroy(chunk_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static void __exit rtrs_server_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) destroy_workqueue(rtrs_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) class_destroy(rtrs_dev_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) mempool_destroy(chunk_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) rtrs_rdma_dev_pd_deinit(&dev_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) module_init(rtrs_server_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) module_exit(rtrs_server_exit);