^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <rdma/ib_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "rds_single_path.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "rds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "ib_mr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Set the selected protocol version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) conn->c_version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Set up flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (rds_ib_sysctl_flow_control && credits != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* We're doing flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ic->i_flowctl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) rds_ib_send_add_credits(conn, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ic->i_flowctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Tune RNR behavior. Without flow control, we use a rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * low timeout, but not the absolute minimum - this should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * be tunable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * We already set the RNR retry count to 7 (which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * smallest infinite number :-) above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * If flow control is off, we want to change this back to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * so that we learn quickly when our credit accounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * buggy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Caller passes in a qp_attr pointer - don't waste stack spacv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * by allocation this twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) attr->min_rnr_timer = IB_RNR_TIMER_000_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Connection established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * We get here for both outgoing and incoming connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) const union rds_ib_conn_priv *dp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct ib_qp_attr qp_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) __be64 ack_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __be32 credit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u8 major = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u8 minor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dp = event->param.conn.private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (conn->c_isv6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (event->param.conn.private_data_len >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) sizeof(struct rds6_ib_connect_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) major = dp->ricp_v6.dp_protocol_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) minor = dp->ricp_v6.dp_protocol_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) credit = dp->ricp_v6.dp_credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* dp structure start is not guaranteed to be 8 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * aligned. Since dp_ack_seq is 64-bit extended load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * operations can be used so go through get_unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * to avoid unaligned errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } else if (event->param.conn.private_data_len >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sizeof(struct rds_ib_connect_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) major = dp->ricp_v4.dp_protocol_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) minor = dp->ricp_v4.dp_protocol_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) credit = dp->ricp_v4.dp_credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* make sure it isn't empty data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (major) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rds_ib_set_protocol(conn, RDS_PROTOCOL(major, minor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) rds_ib_set_flow_control(conn, be32_to_cpu(credit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (conn->c_version < RDS_PROTOCOL_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) &conn->c_laddr, &conn->c_faddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) RDS_PROTOCOL_MAJOR(conn->c_version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) RDS_PROTOCOL_MINOR(conn->c_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) rds_conn_destroy(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ic->i_active_side ? "Active" : "Passive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &conn->c_laddr, &conn->c_faddr, conn->c_tos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) RDS_PROTOCOL_MAJOR(conn->c_version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) RDS_PROTOCOL_MINOR(conn->c_version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ic->i_flowctl ? ", flow control" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* receive sl from the peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ic->i_sl = ic->i_cm_id->route.path_rec->sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) atomic_set(&ic->i_cq_quiesce, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Init rings and fill recv. this needs to wait until protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * negotiation is complete, since ring layout is different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * from 3.1 to 4.1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) rds_ib_send_init_ring(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rds_ib_recv_init_ring(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Post receive buffers - as a side effect, this will update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * the posted credit count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) rds_ib_recv_refill(conn, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Tune RNR behavior */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) rds_ib_tune_rnr(ic, &qp_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) qp_attr.qp_state = IB_QPS_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* update ib_device with this local ipaddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* If the peer gave us the last packet it saw, process this as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * we had received a regular ACK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (dp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (ack_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) rds_send_drop_acked(conn, be64_to_cpu(ack_seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) conn->c_proposed_version = conn->c_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rds_connect_complete(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct rdma_conn_param *conn_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) union rds_ib_conn_priv *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 protocol_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u32 max_responder_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 max_initiator_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) memset(conn_param, 0, sizeof(struct rdma_conn_param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) conn_param->responder_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) conn_param->initiator_depth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) conn_param->rnr_retry_count = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (dp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) memset(dp, 0, sizeof(*dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (isv6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dp->ricp_v6.dp_saddr = conn->c_laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dp->ricp_v6.dp_daddr = conn->c_faddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dp->ricp_v6.dp_protocol_major =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) RDS_PROTOCOL_MAJOR(protocol_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dp->ricp_v6.dp_protocol_minor =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) RDS_PROTOCOL_MINOR(protocol_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dp->ricp_v6.dp_protocol_minor_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dp->ricp_v6.dp_ack_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cpu_to_be64(rds_ib_piggyb_ack(ic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) conn_param->private_data = &dp->ricp_v6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) conn_param->private_data_len = sizeof(dp->ricp_v6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dp->ricp_v4.dp_protocol_major =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) RDS_PROTOCOL_MAJOR(protocol_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dp->ricp_v4.dp_protocol_minor =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) RDS_PROTOCOL_MINOR(protocol_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dp->ricp_v4.dp_protocol_minor_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dp->ricp_v4.dp_ack_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) cpu_to_be64(rds_ib_piggyb_ack(ic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) conn_param->private_data = &dp->ricp_v4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) conn_param->private_data_len = sizeof(dp->ricp_v4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Advertise flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ic->i_flowctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned int credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) credits = IB_GET_POST_CREDITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) (atomic_read(&ic->i_credits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) dp->ricp_v6.dp_credit = cpu_to_be32(credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dp->ricp_v4.dp_credit = cpu_to_be32(credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) atomic_sub(IB_SET_POST_CREDITS(credits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) &ic->i_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rdsdebug("event %u (%s) data %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) event->event, ib_event_msg(event->event), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Plucking the oldest entry from the ring can be done concurrently with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * the thread refilling the ring. Each ring operation is protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * spinlocks and the transient state of refilling doesn't change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * recording of which entry is oldest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * This relies on IB only calling one cq comp_handler for each cq so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * there will only be one caller of rds_recv_incoming() per RDS connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct rds_connection *conn = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) rdsdebug("conn %p cq %p\n", conn, cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) rds_ib_stats_inc(s_ib_evt_handler_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) tasklet_schedule(&ic->i_recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct ib_wc *wcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct ib_wc *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) wc = wcs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) (unsigned long long)wc->wr_id, wc->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) wc->byte_len, be32_to_cpu(wc->ex.imm_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (wc->wr_id <= ic->i_send_ring.w_nr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) wc->wr_id == RDS_IB_ACK_WR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rds_ib_send_cqe_handler(ic, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rds_ib_mr_cqe_handler(ic, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static void rds_ib_tasklet_fn_send(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct rds_connection *conn = ic->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rds_ib_stats_inc(s_ib_tasklet_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* if cq has been already reaped, ignore incoming cq event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (atomic_read(&ic->i_cq_quiesce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (rds_conn_up(conn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) test_bit(0, &conn->c_map_queued)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rds_send_xmit(&ic->conn->c_path[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct ib_wc *wcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct rds_ib_ack_state *ack_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct ib_wc *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) wc = wcs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) (unsigned long long)wc->wr_id, wc->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) wc->byte_len, be32_to_cpu(wc->ex.imm_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rds_ib_recv_cqe_handler(ic, wc, ack_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void rds_ib_tasklet_fn_recv(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct rds_connection *conn = ic->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct rds_ib_ack_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!rds_ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rds_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rds_ib_stats_inc(s_ib_tasklet_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* if cq has been already reaped, ignore incoming cq event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (atomic_read(&ic->i_cq_quiesce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) memset(&state, 0, sizeof(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (state.ack_next_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rds_ib_set_ack(ic, state.ack_next, state.ack_required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rds_send_drop_acked(conn, state.ack_recv, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ic->i_ack_recv = state.ack_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (rds_conn_up(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) rds_ib_attempt_ack(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct rds_connection *conn = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ib_event_msg(event->event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) switch (event->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) case IB_EVENT_COMM_EST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) event->event, ib_event_msg(event->event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) &conn->c_laddr, &conn->c_faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rds_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct rds_connection *conn = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) rdsdebug("conn %p cq %p\n", conn, cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rds_ib_stats_inc(s_ib_evt_handler_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) tasklet_schedule(&ic->i_send_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int index = rds_ibdev->dev->num_comp_vectors - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (rds_ibdev->vector_load[i] < min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) min = rds_ibdev->vector_load[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) rds_ibdev->vector_load[index]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rds_ibdev->vector_load[index]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void rds_dma_hdr_free(struct ib_device *dev, struct rds_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dma_addr_t dma_addr, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ib_dma_unmap_single(dev, dma_addr, sizeof(*hdr), dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) kfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static struct rds_header *rds_dma_hdr_alloc(struct ib_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dma_addr_t *dma_addr, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct rds_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) hdr = kzalloc_node(sizeof(*hdr), GFP_KERNEL, ibdev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *dma_addr = ib_dma_map_single(dev, hdr, sizeof(*hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (ib_dma_mapping_error(dev, *dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) kfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Free the DMA memory used to store struct rds_header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * @dev: the RDS IB device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @hdrs: pointer to the array storing DMA memory pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * @dma_addrs: pointer to the array storing DMA addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * @num_hdars: number of headers to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void rds_dma_hdrs_free(struct rds_ib_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct rds_header **hdrs, dma_addr_t *dma_addrs, u32 num_hdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) for (i = 0; i < num_hdrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) rds_dma_hdr_free(dev->dev, hdrs[i], dma_addrs[i], dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kvfree(hdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) kvfree(dma_addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Allocate DMA coherent memory to be used to store struct rds_header for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * sending/receiving packets. The pointers to the DMA memory and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * associated DMA addresses are stored in two arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * @dev: the RDS IB device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @dma_addrs: pointer to the array for storing DMA addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * @num_hdrs: number of headers to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * It returns the pointer to the array storing the DMA memory pointers. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * error, NULL pointer is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static struct rds_header **rds_dma_hdrs_alloc(struct rds_ib_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dma_addr_t **dma_addrs, u32 num_hdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct rds_header **hdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dma_addr_t *hdr_daddrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) hdrs = kvmalloc_node(sizeof(*hdrs) * num_hdrs, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ibdev_to_node(dev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!hdrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) hdr_daddrs = kvmalloc_node(sizeof(*hdr_daddrs) * num_hdrs, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ibdev_to_node(dev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!hdr_daddrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) kvfree(hdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) for (i = 0; i < num_hdrs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) hdrs[i] = rds_dma_hdr_alloc(dev->dev, &hdr_daddrs[i], dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!hdrs[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rds_dma_hdrs_free(dev, hdrs, hdr_daddrs, i, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *dma_addrs = hdr_daddrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return hdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * This needs to be very careful to not leave IS_ERR pointers around for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * cleanup to trip over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static int rds_ib_setup_qp(struct rds_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct ib_device *dev = ic->i_cm_id->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct ib_qp_init_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct ib_cq_init_attr cq_attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct rds_ib_device *rds_ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned long max_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int ret, fr_queue_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * It's normal to see a null device if an incoming connection races
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * with device removal, so we don't print a warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rds_ibdev = rds_ib_get_client_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!rds_ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* The fr_queue_space is currently set to 512, to add extra space on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * completion queue and send queue. This extra space is used for FRWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * registration and invalidation work requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) fr_queue_space = RDS_IB_DEFAULT_FR_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* add the conn now so that connection establishment has the dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rds_ib_add_conn(rds_ibdev, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (ic->i_send_ring.w_nr != max_wrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (ic->i_recv_ring.w_nr != max_wrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Protection domain and memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ic->i_pd = rds_ibdev->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cq_attr.comp_vector = ic->i_scq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rds_ib_cq_event_handler, conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) &cq_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (IS_ERR(ic->i_send_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ret = PTR_ERR(ic->i_send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ic->i_send_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) rdsdebug("ib_create_cq send failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto rds_ibdev_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) cq_attr.cqe = ic->i_recv_ring.w_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cq_attr.comp_vector = ic->i_rcq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rds_ib_cq_event_handler, conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) &cq_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (IS_ERR(ic->i_recv_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ret = PTR_ERR(ic->i_recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ic->i_recv_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rdsdebug("ib_create_cq recv failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) goto send_cq_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto recv_cq_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) goto recv_cq_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* XXX negotiate max send/recv with remote? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) attr.event_handler = rds_ib_qp_event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) attr.qp_context = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* + 1 to allow for the single ack message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) attr.cap.max_send_sge = rds_ibdev->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) attr.sq_sig_type = IB_SIGNAL_REQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) attr.qp_type = IB_QPT_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) attr.send_cq = ic->i_send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) attr.recv_cq = ic->i_recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * XXX this can fail if max_*_wr is too large? Are we supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * to back off until we get a value that the hardware can support?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) rdsdebug("rdma_create_qp failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) goto recv_cq_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ic->i_send_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_send_hdrs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ic->i_send_ring.w_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!ic->i_send_hdrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) rdsdebug("DMA send hdrs alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto qp_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ic->i_recv_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_recv_hdrs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ic->i_recv_ring.w_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!ic->i_recv_hdrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) rdsdebug("DMA recv hdrs alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto send_hdrs_dma_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ic->i_ack = rds_dma_hdr_alloc(rds_ibdev->dev, &ic->i_ack_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!ic->i_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rdsdebug("DMA ack header alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) goto recv_hdrs_dma_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ic->i_send_ring.w_nr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ibdev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!ic->i_sends) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rdsdebug("send allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto ack_dma_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ic->i_recv_ring.w_nr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ibdev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (!ic->i_recvs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) rdsdebug("recv allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) goto sends_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rds_ib_recv_init_ack(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ic->i_send_cq, ic->i_recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) sends_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) vfree(ic->i_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ack_dma_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rds_dma_hdr_free(rds_ibdev->dev, ic->i_ack, ic->i_ack_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ic->i_ack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) recv_hdrs_dma_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rds_dma_hdrs_free(rds_ibdev, ic->i_recv_hdrs, ic->i_recv_hdrs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ic->i_recv_ring.w_nr, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ic->i_recv_hdrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ic->i_recv_hdrs_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) send_hdrs_dma_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) rds_dma_hdrs_free(rds_ibdev, ic->i_send_hdrs, ic->i_send_hdrs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ic->i_send_ring.w_nr, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ic->i_send_hdrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ic->i_send_hdrs_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) qp_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rdma_destroy_qp(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) recv_cq_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ib_destroy_cq(ic->i_recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ic->i_recv_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) send_cq_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ib_destroy_cq(ic->i_send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ic->i_send_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) rds_ibdev_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rds_ib_remove_conn(rds_ibdev, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) rds_ib_dev_put(rds_ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) const union rds_ib_conn_priv *dp = event->param.conn.private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u8 data_len, major, minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) u32 version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) __be16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) u16 common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * rdma_cm private data is odd - when there is any private data in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * request, we will be given a pretty large buffer without telling us the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * original size. The only way to tell the difference is by looking at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * the contents, which are initialized to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * If the protocol version fields aren't set, this is a connection attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * from an older version. This could be 3.0 or 2.0 - we can't tell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * We really should have changed this for OFED 1.3 :-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* Be paranoid. RDS always has privdata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!event->param.conn.private_data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) printk(KERN_NOTICE "RDS incoming connection has no private data, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) "rejecting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (isv6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) data_len = sizeof(struct rds6_ib_connect_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) major = dp->ricp_v6.dp_protocol_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) minor = dp->ricp_v6.dp_protocol_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mask = dp->ricp_v6.dp_protocol_minor_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) data_len = sizeof(struct rds_ib_connect_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) major = dp->ricp_v4.dp_protocol_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) minor = dp->ricp_v4.dp_protocol_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) mask = dp->ricp_v4.dp_protocol_minor_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Even if len is crap *now* I still want to check it. -ASG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (event->param.conn.private_data_len < data_len || major == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return RDS_PROTOCOL_4_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (major == 4 && common) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) version = RDS_PROTOCOL_4_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) while ((common >>= 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) version++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) } else if (RDS_PROTOCOL_COMPAT_VERSION ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) RDS_PROTOCOL(major, minor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) version = RDS_PROTOCOL_COMPAT_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) &dp->ricp_v6.dp_saddr, major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) &dp->ricp_v4.dp_saddr, major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Given an IPv6 address, find the net_device which hosts that address and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * return its index. This is used by the rds_ib_cm_handle_connect() code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * find the interface index of where an incoming request comes from when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * the request is using a link local address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Note one problem in this search. It is possible that two interfaces have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * the same link local address. Unfortunately, this cannot be solved unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * the underlying layer gives us the interface which an incoming RDMA connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * request comes from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) for_each_netdev_rcu(net, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (ipv6_chk_addr(net, addr, dev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) idx = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct rdma_cm_event *event, bool isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) const struct rds_ib_conn_priv_cmn *dp_cmn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct rds_connection *conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct rds_ib_connection *ic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct rdma_conn_param conn_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) const union rds_ib_conn_priv *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) union rds_ib_conn_priv dp_rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct in6_addr s_mapped_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct in6_addr d_mapped_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) const struct in6_addr *saddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const struct in6_addr *daddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int destroy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u32 ifindex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Check whether the remote protocol version matches ours. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) version = rds_ib_protocol_compatible(event, isv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) err = RDS_RDMA_REJ_INCOMPAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dp = event->param.conn.private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (isv6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dp_cmn = &dp->ricp_v6.dp_cmn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) saddr6 = &dp->ricp_v6.dp_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) daddr6 = &dp->ricp_v6.dp_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* If either address is link local, need to find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * interface index in order to create a proper RDS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (ipv6_addr_type(daddr6) & IPV6_ADDR_LINKLOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Using init_net for now .. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ifindex = __rds_find_ifindex(&init_net, daddr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* No index found... Need to bail out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (ifindex == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else if (ipv6_addr_type(saddr6) & IPV6_ADDR_LINKLOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* Use our address to find the correct index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ifindex = __rds_find_ifindex(&init_net, daddr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* No index found... Need to bail out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (ifindex == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) dp_cmn = &dp->ricp_v4.dp_cmn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) saddr6 = &s_mapped_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) daddr6 = &d_mapped_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) RDS_PROTOCOL_MINOR(version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) (unsigned long long)be64_to_cpu(lguid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* RDS/IB is not currently netns aware, thus init_net */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) conn = rds_conn_create(&init_net, daddr6, saddr6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) &rds_ib_transport, dp_cmn->ricpc_dp_toss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) GFP_KERNEL, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (IS_ERR(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * The connection request may occur while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * previous connection exist, e.g. in case of failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * But as connections may be initiated simultaneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * by both hosts, we have a random backoff mechanism -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * see the comment above rds_queue_reconnect()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) mutex_lock(&conn->c_cm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (rds_conn_state(conn) == RDS_CONN_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) rdsdebug("incoming connect while connecting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) rds_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rds_ib_stats_inc(s_ib_listen_closed_stale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* Wait and see - our connect may still be succeeding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) rds_ib_stats_inc(s_ib_connect_raced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rds_ib_set_protocol(conn, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* If the peer gave us the last packet it saw, process this as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * we had received a regular ACK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (dp_cmn->ricpc_ack_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) BUG_ON(cm_id->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) BUG_ON(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ic->i_cm_id = cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) cm_id->context = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* We got halfway through setting up the ib_connection, if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * fail now, we have to take the long route out of this mess. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) destroy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) err = rds_ib_setup_qp(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) event->param.conn.responder_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) event->param.conn.initiator_depth, isv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* rdma_accept() calls rdma_reject() internally if it fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (rdma_accept(cm_id, &conn_param))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) rds_ib_conn_error(conn, "rdma_accept failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mutex_unlock(&conn->c_cm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) rdma_reject(cm_id, &err, sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) IB_CM_REJ_CONSUMER_DEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct rds_connection *conn = cm_id->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct rdma_conn_param conn_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) union rds_ib_conn_priv dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* If the peer doesn't do protocol negotiation, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * default to RDSv3.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ret = rds_ib_setup_qp(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) conn->c_proposed_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) UINT_MAX, UINT_MAX, isv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ret = rdma_connect_locked(cm_id, &conn_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* Beware - returning non-zero tells the rdma_cm to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * the cm_id. We should certainly not do it as long as we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * "own" the cm_id. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (ic->i_cm_id == cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ic->i_active_side = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int rds_ib_conn_path_connect(struct rds_conn_path *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct rds_connection *conn = cp->cp_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct sockaddr_storage src, dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) rdma_cm_event_handler handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct rds_ib_connection *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* XXX I wonder what affect the port space has */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* delegate cm event handler to rdma_transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (conn->c_isv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) handler = rds6_rdma_cm_event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) handler = rds_rdma_cm_event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ic->i_cm_id = rdma_create_id(&init_net, handler, conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) RDMA_PS_TCP, IB_QPT_RC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (IS_ERR(ic->i_cm_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret = PTR_ERR(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ic->i_cm_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) rdsdebug("rdma_create_id() failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (ipv6_addr_v4mapped(&conn->c_faddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct sockaddr_in *sin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) sin = (struct sockaddr_in *)&src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) sin->sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) sin->sin_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) sin = (struct sockaddr_in *)&dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) sin->sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) sin->sin_port = htons(RDS_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct sockaddr_in6 *sin6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sin6 = (struct sockaddr_in6 *)&src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) sin6->sin6_family = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sin6->sin6_addr = conn->c_laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sin6->sin6_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) sin6->sin6_scope_id = conn->c_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) sin6 = (struct sockaddr_in6 *)&dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) sin6->sin6_family = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) sin6->sin6_addr = conn->c_faddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) sin6->sin6_port = htons(RDS_CM_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) sin6->sin6_scope_id = conn->c_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) (struct sockaddr *)&dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) RDS_RDMA_RESOLVE_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) rdma_destroy_id(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ic->i_cm_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * This is so careful about only cleaning up resources that were built up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * so that it can be called at any point during startup. In fact it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * can be called multiple times for a given connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct rds_connection *conn = cp->cp_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ic->i_cm_id ? ic->i_cm_id->qp : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (ic->i_cm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) err = rdma_disconnect(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* Actually this may happen quite frequently, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * an outgoing connect raced with an incoming connect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) rdsdebug("failed to disconnect, cm: %p err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ic->i_cm_id, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /* kick off "flush_worker" for all pools in order to reap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * all FRMR registrations that are still marked "FRMR_IS_INUSE"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rds_ib_flush_mrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * We want to wait for tx and rx completion to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * before we tear down the connection, but we have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * careful not to get stuck waiting on a send ring that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * only has unsignaled sends in it. We've shutdown new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * sends before getting here so by waiting for signaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * sends to complete we're ensured that there will be no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * more tx processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) wait_event(rds_ib_ring_empty_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) rds_ib_ring_empty(&ic->i_recv_ring) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) (atomic_read(&ic->i_signaled_sends) == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) (atomic_read(&ic->i_fastreg_inuse_count) == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) tasklet_kill(&ic->i_send_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) tasklet_kill(&ic->i_recv_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) atomic_set(&ic->i_cq_quiesce, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* first destroy the ib state that generates callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (ic->i_cm_id->qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) rdma_destroy_qp(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (ic->i_send_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (ic->rds_ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ib_destroy_cq(ic->i_send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (ic->i_recv_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (ic->rds_ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ib_destroy_cq(ic->i_recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (ic->rds_ibdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* then free the resources that ib callbacks use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (ic->i_send_hdrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) rds_dma_hdrs_free(ic->rds_ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ic->i_send_hdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ic->i_send_hdrs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ic->i_send_ring.w_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ic->i_send_hdrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ic->i_send_hdrs_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (ic->i_recv_hdrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) rds_dma_hdrs_free(ic->rds_ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ic->i_recv_hdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ic->i_recv_hdrs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ic->i_recv_ring.w_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ic->i_recv_hdrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ic->i_recv_hdrs_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (ic->i_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rds_dma_hdr_free(ic->rds_ibdev->dev, ic->i_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) ic->i_ack_dma, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ic->i_ack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) WARN_ON(ic->i_send_hdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) WARN_ON(ic->i_send_hdrs_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) WARN_ON(ic->i_recv_hdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) WARN_ON(ic->i_recv_hdrs_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) WARN_ON(ic->i_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (ic->i_sends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) rds_ib_send_clear_ring(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (ic->i_recvs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) rds_ib_recv_clear_ring(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) rdma_destroy_id(ic->i_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * Move connection back to the nodev list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (ic->rds_ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) rds_ib_remove_conn(ic->rds_ibdev, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ic->i_cm_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ic->i_pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ic->i_send_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ic->i_recv_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) BUG_ON(ic->rds_ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Clear pending transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (ic->i_data_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct rds_message *rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) rm = container_of(ic->i_data_op, struct rds_message, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) rds_message_put(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ic->i_data_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* Clear the ACK state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) #ifdef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) atomic64_set(&ic->i_ack_next, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ic->i_ack_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ic->i_ack_recv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* Clear flow control state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ic->i_flowctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) atomic_set(&ic->i_credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Re-init rings, but retain sizes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (ic->i_ibinc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) rds_inc_put(&ic->i_ibinc->ii_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ic->i_ibinc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) vfree(ic->i_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ic->i_sends = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) vfree(ic->i_recvs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ic->i_recvs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) ic->i_active_side = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct rds_ib_connection *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* XXX too lazy? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ret = rds_ib_recv_alloc_caches(ic, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) kfree(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) INIT_LIST_HEAD(&ic->ib_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) (unsigned long)ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) (unsigned long)ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) mutex_init(&ic->i_recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #ifndef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) spin_lock_init(&ic->i_ack_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) atomic_set(&ic->i_signaled_sends, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * rds_ib_conn_shutdown() waits for these to be emptied so they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * must be initialized before it can be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) rds_ib_ring_init(&ic->i_send_ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) rds_ib_ring_init(&ic->i_recv_ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) ic->conn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) conn->c_transport_data = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) spin_lock_irqsave(&ib_nodev_conns_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) list_add_tail(&ic->ib_node, &ib_nodev_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * Free a connection. Connection must be shut down and not set for reconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) void rds_ib_conn_free(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct rds_ib_connection *ic = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) spinlock_t *lock_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) rdsdebug("ic %p\n", ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * Conn is either on a dev's list or on the nodev list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * A race with shutdown() or connect() would cause problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * (since rds_ibdev would change) but that should never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) spin_lock_irq(lock_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) list_del(&ic->ib_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) spin_unlock_irq(lock_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) rds_ib_recv_free_caches(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) kfree(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * An error occurred on the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) rds_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) vprintk(fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }