^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <rdma/ib_mad.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <rdma/ib_smi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <rdma/ib_sa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <rdma/ib_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/mlx4/cmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <rdma/ib_pma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/mlx4/driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "mlx4_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MLX4_IB_VENDOR_CLASS1 = 0x9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) MLX4_IB_VENDOR_CLASS2 = 0xa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define MLX4_TUN_SEND_WRID_SHIFT 34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define MLX4_TUN_QPN_SHIFT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Port mgmt change event handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define NUM_IDX_IN_PKEY_TBL_BLK 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define GUID_TBL_BLK_NUM_ENTRIES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct mlx4_mad_rcv_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct ib_grh grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u8 payload[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct mlx4_mad_snd_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u8 payload[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct mlx4_tunnel_mad {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct ib_grh grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mlx4_ib_tunnel_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct ib_mad mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct mlx4_rcv_tunnel_mad {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct mlx4_rcv_tunnel_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct ib_grh grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct ib_mad mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int block, u32 change_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __be64 mlx4_ib_gen_node_guid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return cpu_to_be64(NODE_GUID_HI | prandom_u32());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) cpu_to_be64(0xff00000000000000LL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int port, const struct ib_wc *in_wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) const void *in_mad, void *response_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void *inbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 in_modifier = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 op_modifier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (IS_ERR(inmailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return PTR_ERR(inmailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) inbox = inmailbox->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (IS_ERR(outmailbox)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mlx4_free_cmd_mailbox(dev->dev, inmailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return PTR_ERR(outmailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) memcpy(inbox, in_mad, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Key check traps can't be generated unless we have in_wc to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * tell us where to send the trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) op_modifier |= 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) op_modifier |= 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (mlx4_is_mfunc(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) op_modifier |= 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (in_wc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __be32 my_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __be32 rqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u8 sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u8 g_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u16 reserved2[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __be16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 reserved3[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u8 grh[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) } *ext_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) memset(inbox + 256, 0, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ext_info = inbox + 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ext_info->sl = in_wc->sl << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ext_info->g_path = in_wc->dlid_path_bits |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (in_grh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) memcpy(ext_info->grh, in_grh, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) op_modifier |= 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) memcpy(response_mad, outmailbox->buf, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mlx4_free_cmd_mailbox(dev->dev, inmailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) mlx4_free_cmd_mailbox(dev->dev, outmailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ib_ah *new_ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct rdma_ah_attr ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!dev->send_agent[port_num - 1][0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) memset(&ah_attr, 0, sizeof ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rdma_ah_set_dlid(&ah_attr, lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rdma_ah_set_sl(&ah_attr, sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) rdma_ah_set_port_num(&ah_attr, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) &ah_attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (IS_ERR(new_ah))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) spin_lock_irqsave(&dev->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (dev->sm_ah[port_num - 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dev->sm_ah[port_num - 1] = new_ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) spin_unlock_irqrestore(&dev->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u16 prev_lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct ib_port_info *pinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u16 lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __be16 *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 bn, pkey_change_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mad->mad_hdr.method == IB_MGMT_METHOD_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) switch (mad->mad_hdr.attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) case IB_SMP_ATTR_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) lid = be16_to_cpu(pinfo->lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) update_sm_ah(dev, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) be16_to_cpu(pinfo->sm_lid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pinfo->neighbormtu_mastersmsl & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (pinfo->clientrereg_resv_subnetto & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) handle_client_rereg_event(dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (prev_lid != lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) handle_lid_change_event(dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case IB_SMP_ATTR_PKEY_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!mlx4_is_mfunc(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mlx4_ib_dispatch_event(dev, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) IB_EVENT_PKEY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* at this point, we are running in the master.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Slaves do not receive SMPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pkey_change_bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pr_debug("PKEY[%d] = x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) i + bn*32, be16_to_cpu(base[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (be16_to_cpu(base[i]) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pkey_change_bitmap |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) be16_to_cpu(base[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pr_debug("PKEY Change event: port=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) "block=0x%x, change_bitmap=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) port_num, bn, pkey_change_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (pkey_change_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) mlx4_ib_dispatch_event(dev, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) IB_EVENT_PKEY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!dev->sriov.is_going_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) __propagate_pkey_ev(dev, port_num, bn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pkey_change_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) case IB_SMP_ATTR_GUID_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* paravirtualized master's guid is guid 0 -- does not change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mlx4_ib_dispatch_event(dev, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) IB_EVENT_GID_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*if master, notify relevant slaves*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (mlx4_is_master(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) !dev->sriov.is_going_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) (u8 *)(&((struct ib_smp *)mad)->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) (u8 *)(&((struct ib_smp *)mad)->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) case IB_SMP_ATTR_SL_TO_VL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* cache sl to vl mapping changes for use in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * filling QP1 LRH VL field when sending packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!mlx4_is_slave(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) union sl2vl_tbl_to_u64 sl2vl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int jj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (jj = 0; jj < 8; jj++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pr_debug("port %u, sl2vl[%d] = %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) port_num, jj, sl2vl64.sl8[jj]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int block, u32 change_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int i, ix, slave, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int have_event = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (slave == mlx4_master_func_num(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!mlx4_is_slave_active(dev->dev, slave))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) have_event = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!(change_bitmap & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) for (ix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) [ix] == i + 32 * block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_debug("propagate_pkey_ev: slave %d,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) " port %d, ix %d (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) slave, port_num, ix, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) have_event = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (have_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void node_desc_override(struct ib_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) IB_DEVICE_NODE_DESC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct ib_mad_send_buf *send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (agent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) IB_MGMT_MAD_DATA, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) IB_MGMT_BASE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (IS_ERR(send_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * We rely here on the fact that MLX QPs don't use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * address handle after the send is posted (this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * wrong following the IB spec strictly, but we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * it's OK for our devices).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spin_lock_irqsave(&dev->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) memcpy(send_buf->mad, mad, sizeof *mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if ((send_buf->ah = dev->sm_ah[port_num - 1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ret = ib_post_send_mad(send_buf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_unlock_irqrestore(&dev->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ib_free_send_mad(send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct ib_sa_mad *sa_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* dispatch to different sa handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) case IB_SA_ATTR_MC_MEMBER_REC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) u8 port, u16 pkey, u16 *ix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) u16 slot_pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (slave == mlx4_master_func_num(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (slot_pkey & 0x8000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *ix = (u16) pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* take first partial pkey index found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (partial_ix == 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) partial_ix = pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (partial_ix < 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *ix = (u16) partial_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) union ib_gid *dgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) enum rdma_network_type net_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (version == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) net_type = RDMA_NETWORK_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else if (version == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) net_type = RDMA_NETWORK_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) sgid, dgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return (qpn >= proxy_start && qpn <= proxy_start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) enum ib_qp_type dest_qpt, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct ib_grh *grh, struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct ib_sge list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct ib_ud_wr wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) const struct ib_send_wr *bad_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct mlx4_ib_demux_pv_ctx *tun_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct mlx4_ib_demux_pv_qp *tun_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct mlx4_rcv_tunnel_mad *tun_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct rdma_ah_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct ib_ah *ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct ib_qp *src_qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned tun_tx_ix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int dqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) u16 tun_pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u16 cached_pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (dest_qpt > IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tun_ctx = dev->sriov.demux[port-1].tun[slave];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* check if proxy qp created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!dest_qpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) tun_qp = &tun_ctx->qp[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) tun_qp = &tun_ctx->qp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* compute P_Key index to put in tunnel header for slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (dest_qpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u16 pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) pr_debug("unable to get %s cached pkey for index %d, ret %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) wc->pkey_index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) cached_pkey, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) tun_pkey_ix = pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* get tunnel tx data buf for slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) src_qp = tun_qp->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* create ah. Just need an empty one with the port num for the post send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * The driver will set the force loopback bit in post_send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) memset(&attr, 0, sizeof attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) attr.type = rdma_ah_find_type(&dev->ib_dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) rdma_ah_set_port_num(&attr, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) union ib_gid sgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) union ib_gid dgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ah = rdma_create_ah(tun_ctx->pd, &attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (IS_ERR(ah))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* allocate tunnel tx buf after pass failure returns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) (MLX4_NUM_TUNNEL_BUFS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) spin_unlock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (tun_qp->tx_ring[tun_tx_ix].ah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) tun_qp->tx_ring[tun_tx_ix].ah = ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ib_dma_sync_single_for_cpu(&dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) tun_qp->tx_ring[tun_tx_ix].buf.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) sizeof (struct mlx4_rcv_tunnel_mad),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* copy over to tunnel buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (grh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) memcpy(&tun_mad->grh, grh, sizeof *grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memcpy(&tun_mad->mad, mad, sizeof *mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* adjust tunnel data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u16 vlan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* VST mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (vlan != wc->vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Packet vlan is not the VST-assigned vlan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Drop the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Remove the vlan tag before forwarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * the packet to the VF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) vlan = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) vlan = wc->vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ib_dma_sync_single_for_device(&dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) tun_qp->tx_ring[tun_tx_ix].buf.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) sizeof (struct mlx4_rcv_tunnel_mad),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list.length = sizeof (struct mlx4_rcv_tunnel_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) list.lkey = tun_ctx->pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) wr.ah = ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) wr.port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) wr.remote_qkey = IB_QP_SET_QKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) wr.remote_qpn = dqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) wr.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) wr.wr.sg_list = &list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) wr.wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) wr.wr.opcode = IB_WR_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) wr.wr.send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) spin_lock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) tun_qp->tx_ix_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) spin_unlock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) tun_qp->tx_ring[tun_tx_ix].ah = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) rdma_destroy_ah(ah, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct ib_wc *wc, struct ib_grh *grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int err, other_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int slave = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u8 *slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) int is_eth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) is_eth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) is_eth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) union ib_gid dgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) union ib_gid sgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (!(wc->wc_flags & IB_WC_GRH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (err && mlx4_is_mf_bonded(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) other_port = (port == 1) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) port = other_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) slave, grh->dgid.raw, port, other_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) mlx4_ib_warn(ibdev, "failed matching grh\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (slave >= dev->dev->caps.sqp_demux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) slave, dev->dev->caps.sqp_demux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) slave, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Initially assume that this mad is for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) slave = mlx4_master_func_num(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* See if the slave id is encoded in a response mad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (mad->mad_hdr.method & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) slave_id = (u8 *) &mad->mad_hdr.tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) slave = *slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (slave != 255) /*255 indicates the dom0*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) *slave_id = 0; /* remap tid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* If a grh is present, we demux according to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (wc->wc_flags & IB_WC_GRH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (grh->dgid.global.interface_id ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) grh->dgid.global.subnet_prefix == cpu_to_be64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) slave = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) slave = mlx4_ib_find_real_gid(ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) grh->dgid.global.interface_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (slave < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mlx4_ib_warn(ibdev, "failed matching grh\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Class-specific handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) switch (mad->mad_hdr.mgmt_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) case IB_MGMT_CLASS_SUBN_LID_ROUTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* 255 indicates the dom0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* for a VF. drop unsolicited MADs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) slave, mad->mad_hdr.mgmt_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) mad->mad_hdr.method);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) case IB_MGMT_CLASS_SUBN_ADM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) (struct ib_sa_mad *) mad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) case IB_MGMT_CLASS_CM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) case IB_MGMT_CLASS_DEVICE_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Drop unsupported classes for slaves in tunnel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (slave != mlx4_master_func_num(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) pr_debug("dropping unsupported ingress mad from class:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /*make sure that no slave==255 was not handled yet.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (slave >= dev->dev->caps.sqp_demux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) slave, dev->dev->caps.sqp_demux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) slave, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) const struct ib_wc *in_wc, const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) const struct ib_mad *in_mad, struct ib_mad *out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u16 slid, prev_lid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct ib_port_attr pattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) forward_trap(to_mdev(ibdev), port_num, in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * Don't process SMInfo queries -- the SMA can't handle them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) !ib_query_port(ibdev, port_num, &pattr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) prev_lid = ib_lid_cpu16(pattr.lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) err = mlx4_MAD_IFC(to_mdev(ibdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) MLX4_MAD_IFC_NET_VIEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) port_num, in_wc, in_grh, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!out_mad->mad_hdr.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) smp_snoop(ibdev, port_num, in_mad, prev_lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* slaves get node desc from FW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!mlx4_is_slave(to_mdev(ibdev)->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) node_desc_override(ibdev, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* set return bit in status of directed route responses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* no response for trap repress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static void edit_counter(struct mlx4_counter *cnt, void *counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) __be16 attr_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) switch (attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) case IB_PMA_PORT_COUNTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct ib_pma_portcounters *pma_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) (struct ib_pma_portcounters *)counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) (be64_to_cpu(cnt->tx_bytes) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) (be64_to_cpu(cnt->rx_bytes) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) be64_to_cpu(cnt->tx_frames));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) be64_to_cpu(cnt->rx_frames));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case IB_PMA_PORT_COUNTERS_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct ib_pma_portcounters_ext *pma_cnt_ext =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) (struct ib_pma_portcounters_ext *)counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pma_cnt_ext->port_xmit_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) pma_cnt_ext->port_rcv_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int iboe_process_mad_port_info(void *out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct ib_class_port_info cpi = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) memcpy(out_mad, &cpi, sizeof(cpi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) const struct ib_wc *in_wc, const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) const struct ib_mad *in_mad, struct ib_mad *out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct mlx4_counter counter_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct counter_index *tmp_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return iboe_process_mad_port_info((void *)(out_mad->data + 40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) memset(&counter_stats, 0, sizeof(counter_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) mutex_lock(&dev->counters_table[port_num - 1].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) list_for_each_entry(tmp_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) &dev->counters_table[port_num - 1].counters_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) err = mlx4_get_counter_stats(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) tmp_counter->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) &counter_stats, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) err = IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) stats_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) stats_avail = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) mutex_unlock(&dev->counters_table[port_num - 1].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (stats_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) switch (counter_stats.counter_mode & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) edit_counter(&counter_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) (void *)(out_mad->data + 40),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) in_mad->mad_hdr.attr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) err = IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) const struct ib_wc *in_wc, const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) const struct ib_mad *in, struct ib_mad *out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) size_t *out_mad_size, u16 *out_mad_pkey_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * queries, should be called only by VFs and for that specific purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (link == IB_LINK_LAYER_INFINIBAND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (mlx4_is_slave(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return iboe_process_mad(ibdev, mad_flags, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) in_wc, in_grh, in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (link == IB_LINK_LAYER_ETHERNET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) in_grh, in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static void send_handler(struct ib_mad_agent *agent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct ib_mad_send_wc *mad_send_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (mad_send_wc->send_buf->context[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ib_free_send_mad(mad_send_wc->send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct ib_mad_agent *agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int p, q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) enum rdma_link_layer ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) for (p = 0; p < dev->num_ports; ++p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) for (q = 0; q <= 1; ++q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ll == IB_LINK_LAYER_INFINIBAND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) q ? IB_QPT_GSI : IB_QPT_SMI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) NULL, 0, send_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (IS_ERR(agent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ret = PTR_ERR(agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dev->send_agent[p][q] = agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) dev->send_agent[p][q] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) for (p = 0; p < dev->num_ports; ++p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) for (q = 0; q <= 1; ++q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (dev->send_agent[p][q])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ib_unregister_mad_agent(dev->send_agent[p][q]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct ib_mad_agent *agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int p, q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) for (p = 0; p < dev->num_ports; ++p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) for (q = 0; q <= 1; ++q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) agent = dev->send_agent[p][q];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (agent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dev->send_agent[p][q] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ib_unregister_mad_agent(agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (dev->sm_ah[p])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) rdma_destroy_ah(dev->sm_ah[p], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* re-configure the alias-guid and mcg's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (mlx4_is_master(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mlx4_ib_invalidate_all_guid_record(dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (!dev->sriov.is_going_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* Update the sl to vl table from inside client rereg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * only if in secure-host mode (snooping is not possible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * and the sl-to-vl change event is not generated by FW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!mlx4_is_slave(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /* already in work queue from mlx4_ib_event queueing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * mlx4_handle_port_mgmt_change_event, which calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * this procedure. Therefore, call sl2vl_update directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) mlx4_ib_sl2vl_update(dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) mlx4_sched_ib_sl2vl_update_work(dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct mlx4_eqe *eqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) GET_MASK_FROM_EQE(eqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u32 guid_tbl_blk_num, u32 change_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) guid_tbl_blk_num *= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) memset(in_mad, 0, sizeof *in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) memset(out_mad, 0, sizeof *out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) in_mad->base_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) in_mad->class_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) in_mad->method = IB_MGMT_METHOD_GET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (mlx4_MAD_IFC(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) port_num, NULL, NULL, in_mad, out_mad)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) (u8 *)(&((struct ib_smp *)out_mad)->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) (u8 *)(&((struct ib_smp *)out_mad)->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void handle_port_mgmt_change_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct mlx4_ib_dev *dev = ew->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct mlx4_eqe *eqe = &(ew->ib_eqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) u8 port = eqe->event.port_mgmt_change.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) u32 changed_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) u32 tbl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) u32 change_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) switch (eqe->subtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* Update the SM ah - This should be done before handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) the other changed attributes so that MADs can be sent to the SM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (changed_attr & MSTR_SM_CHANGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) update_sm_ah(dev, port, lid, sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* Check if it is a lid change event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) handle_lid_change_event(dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Generate GUID changed event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (mlx4_is_master(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) union ib_gid gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) gid.global.subnet_prefix =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) eqe->event.port_mgmt_change.params.port_info.gid_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) port, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) be64_to_cpu(gid.global.subnet_prefix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) be64_to_cpu(gid.global.subnet_prefix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*if master, notify all slaves*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) handle_client_rereg_event(dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) propagate_pkey_ev(dev, port, eqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* paravirtualized master's guid is guid 0 -- does not change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /*if master, notify relevant slaves*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) else if (!dev->sriov.is_going_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) change_bitmap = GET_MASK_FROM_EQE(eqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /* cache sl to vl mapping changes for use in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * filling QP1 LRH VL field when sending packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!mlx4_is_slave(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) union sl2vl_tbl_to_u64 sl2vl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int jj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (jj = 0; jj < 8; jj++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) sl2vl64.sl8[jj] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) pr_debug("port %u, sl2vl[%d] = %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) port, jj, sl2vl64.sl8[jj]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) pr_warn("Unsupported subtype 0x%x for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) "Port Management Change event\n", eqe->subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) kfree(ew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) enum ib_event_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) event.device = &dev->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) event.element.port_num = port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) event.event = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) queue_work(ctx->wq, &ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) queue_work(ctx->wi_wq, &ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct mlx4_ib_demux_pv_qp *tun_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct ib_sge sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct ib_recv_wr recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) const struct ib_recv_wr *bad_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) sg_list.addr = tun_qp->ring[index].map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) sg_list.length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sg_list.lkey = ctx->pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) recv_wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) recv_wr.sg_list = &sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) recv_wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int slave, struct ib_sa_mad *sa_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* dispatch to different sa handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) case IB_SA_ATTR_MC_MEMBER_REC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) enum ib_qp_type dest_qpt, u16 pkey_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct ib_sge list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct ib_ud_wr wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) const struct ib_send_wr *bad_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct mlx4_ib_demux_pv_ctx *sqp_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct mlx4_ib_demux_pv_qp *sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct mlx4_mad_snd_buf *sqp_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct ib_ah *ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct ib_qp *send_qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) unsigned wire_tx_ix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) u16 wire_pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) int src_qpnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) sqp_ctx = dev->sriov.sqps[port-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* check if proxy qp created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (dest_qpt == IB_QPT_SMI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) src_qpnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) sqp = &sqp_ctx->qp[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) src_qpnum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) sqp = &sqp_ctx->qp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) send_qp = sqp->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!ah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ah->device = sqp_ctx->pd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) ah->pd = sqp_ctx->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* create ah */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ret = mlx4_ib_create_ah_slave(ah, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) rdma_ah_retrieve_grh(attr)->sgid_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) s_mac, vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) spin_lock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (sqp->tx_ix_head - sqp->tx_ix_tail >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) (MLX4_NUM_WIRE_BUFS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) spin_unlock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) kfree(sqp->tx_ring[wire_tx_ix].ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) sqp->tx_ring[wire_tx_ix].ah = ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) ib_dma_sync_single_for_cpu(&dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) sqp->tx_ring[wire_tx_ix].buf.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) sizeof (struct mlx4_mad_snd_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) memcpy(&sqp_mad->payload, mad, sizeof *mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ib_dma_sync_single_for_device(&dev->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) sqp->tx_ring[wire_tx_ix].buf.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) sizeof (struct mlx4_mad_snd_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) list.length = sizeof (struct mlx4_mad_snd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) list.lkey = sqp_ctx->pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) wr.ah = ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) wr.port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) wr.pkey_index = wire_pkey_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) wr.remote_qkey = qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) wr.remote_qpn = remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) wr.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) wr.wr.sg_list = &list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) wr.wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) wr.wr.opcode = IB_WR_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) wr.wr.send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) spin_lock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) sqp->tx_ix_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) spin_unlock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) sqp->tx_ring[wire_tx_ix].ah = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) kfree(ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return mlx4_get_base_gid_ix(dev->dev, slave, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct rdma_ah_attr *ah_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) grh->sgid_index = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct mlx4_ib_ah ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct rdma_ah_attr ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) u8 *slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) int slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) u16 vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) u8 qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) u8 *dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* Get slave that sent this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) (wc->src_qp & 0x1) != ctx->port - 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) wc->src_qp & 0x4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (slave != ctx->slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) "belongs to another slave\n", wc->src_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* Map transaction ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) sizeof (struct mlx4_tunnel_mad),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) switch (tunnel->mad.mad_hdr.method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) case IB_MGMT_METHOD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) case IB_MGMT_METHOD_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) case IB_MGMT_METHOD_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) case IB_SA_METHOD_GET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case IB_SA_METHOD_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) case IB_SA_METHOD_GET_MULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) case IB_SA_METHOD_GET_TRACE_TBL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (*slave_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) "class:%d slave:%d\n", *slave_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) tunnel->mad.mad_hdr.mgmt_class, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *slave_id = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /* nothing */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* Class-specific handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) switch (tunnel->mad.mad_hdr.mgmt_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) case IB_MGMT_CLASS_SUBN_LID_ROUTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (slave != mlx4_master_func_num(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) case IB_MGMT_CLASS_SUBN_ADM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) (struct ib_sa_mad *) &tunnel->mad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) case IB_MGMT_CLASS_CM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) (struct ib_mad *) &tunnel->mad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) case IB_MGMT_CLASS_DEVICE_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /* Drop unsupported classes for slaves in tunnel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (slave != mlx4_master_func_num(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* We are using standard ib_core services to send the mad, so generate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * stadard address handle by decoding the tunnelled mlx4_ah fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ah.ibah.device = ctx->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) port = mlx4_slave_convert_port(dev->dev, slave, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (port < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) mlx4_ib_query_ah(&ah.ibah, &ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) dmac = rdma_ah_retrieve_dmac(&ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) vlan_id = be16_to_cpu(tunnel->hdr.vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* if slave have default vlan use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) &vlan_id, &qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) rdma_ah_set_sl(&ah_attr, qos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) sts = mlx4_ib_send_to_wire(dev, slave, ctx->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) is_proxy_qp0(dev, wc->src_qp, slave) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) IB_QPT_SMI : IB_QPT_GSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) be16_to_cpu(tunnel->hdr.pkey_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) be32_to_cpu(tunnel->hdr.remote_qpn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) be32_to_cpu(tunnel->hdr.qkey),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) &ah_attr, wc->smac, vlan_id, &tunnel->mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) slave, sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) enum ib_qp_type qp_type, int is_tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct mlx4_ib_demux_pv_qp *tun_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) int rx_buf_size, tx_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (qp_type > IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) tun_qp = &ctx->qp[qp_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) tun_qp->ring = kcalloc(nmbr_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) sizeof(struct mlx4_ib_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (!tun_qp->ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) tun_qp->tx_ring = kcalloc(nmbr_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) sizeof (struct mlx4_ib_tun_tx_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (!tun_qp->tx_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) kfree(tun_qp->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) tun_qp->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (is_tun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) rx_buf_size = sizeof (struct mlx4_tunnel_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) for (i = 0; i < nmbr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (!tun_qp->ring[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) tun_qp->ring[i].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) rx_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) kfree(tun_qp->ring[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) for (i = 0; i < nmbr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) tun_qp->tx_ring[i].buf.addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) kmalloc(tx_buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (!tun_qp->tx_ring[i].buf.addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) tun_qp->tx_ring[i].buf.map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) ib_dma_map_single(ctx->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) tun_qp->tx_ring[i].buf.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) tx_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (ib_dma_mapping_error(ctx->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) tun_qp->tx_ring[i].buf.map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) kfree(tun_qp->tx_ring[i].buf.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) tun_qp->tx_ring[i].ah = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) spin_lock_init(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) tun_qp->tx_ix_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) tun_qp->tx_ix_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) tun_qp->proxy_qpt = qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) tx_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) while (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) --i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) tx_buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) kfree(tun_qp->tx_ring[i].buf.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) i = nmbr_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) while (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) --i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) rx_buf_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) kfree(tun_qp->ring[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) kfree(tun_qp->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) tun_qp->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) kfree(tun_qp->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) tun_qp->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) enum ib_qp_type qp_type, int is_tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) struct mlx4_ib_demux_pv_qp *tun_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) int rx_buf_size, tx_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (qp_type > IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) tun_qp = &ctx->qp[qp_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (is_tun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) rx_buf_size = sizeof (struct mlx4_tunnel_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) for (i = 0; i < nmbr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) rx_buf_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) kfree(tun_qp->ring[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) for (i = 0; i < nmbr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) tx_buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) kfree(tun_qp->tx_ring[i].buf.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (tun_qp->tx_ring[i].ah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) kfree(tun_qp->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) kfree(tun_qp->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct mlx4_ib_demux_pv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct mlx4_ib_demux_pv_qp *tun_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (wc.status == IB_WC_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) switch (wc.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) case IB_WC_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) mlx4_ib_multiplex_mad(ctx, &wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) (MLX4_NUM_TUNNEL_BUFS - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) pr_err("Failed reposting tunnel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) "buf:%lld\n", wc.wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case IB_WC_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) spin_lock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) tun_qp->tx_ix_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) spin_unlock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) pr_debug("mlx4_ib: completion error in tunnel: %d."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) " status = %d, wrid = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ctx->slave, wc.status, wc.wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) spin_lock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) tun_qp->tx_ix_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) spin_unlock(&tun_qp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* It's worse than that! He's dead, Jim! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) pr_err("Fatal error (%d) on a MAD QP on port %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) event->event, sqp->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) enum ib_qp_type qp_type, int create_tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct mlx4_ib_demux_pv_qp *tun_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct ib_qp_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) int qp_attr_mask_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (qp_type > IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) tun_qp = &ctx->qp[qp_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) memset(&qp_init_attr, 0, sizeof qp_init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) qp_init_attr.init_attr.send_cq = ctx->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) qp_init_attr.init_attr.recv_cq = ctx->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) qp_init_attr.init_attr.cap.max_send_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) qp_init_attr.init_attr.cap.max_recv_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (create_tun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) qp_init_attr.init_attr.qp_type = IB_QPT_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) qp_init_attr.port = ctx->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) qp_init_attr.slave = ctx->slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) qp_init_attr.proxy_qp_type = qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) IB_QP_QKEY | IB_QP_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) qp_init_attr.init_attr.qp_type = qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) qp_init_attr.init_attr.port_num = ctx->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) qp_init_attr.init_attr.qp_context = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (IS_ERR(tun_qp->qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) ret = PTR_ERR(tun_qp->qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) tun_qp->qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) pr_err("Couldn't create %s QP (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) create_tun ? "tunnel" : "special", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) memset(&attr, 0, sizeof attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) attr.qp_state = IB_QPS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (create_tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ctx->port, IB_DEFAULT_PKEY_FULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) &attr.pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (ret || !create_tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) attr.pkey_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) attr.qkey = IB_QP1_QKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) attr.port_num = ctx->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) pr_err("Couldn't change %s qp state to INIT (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) create_tun ? "tunnel" : "special", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) goto err_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) attr.qp_state = IB_QPS_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) pr_err("Couldn't change %s qp state to RTR (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) create_tun ? "tunnel" : "special", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) goto err_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) attr.qp_state = IB_QPS_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) attr.sq_psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) pr_err("Couldn't change %s qp state to RTS (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) create_tun ? "tunnel" : "special", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) goto err_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) for (i = 0; i < nmbr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) pr_err(" mlx4_ib_post_pv_buf error"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) " (err = %d, i = %d)\n", ret, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) goto err_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) err_qp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) ib_destroy_qp(tun_qp->qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) tun_qp->qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * IB MAD completion callback for real SQPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct mlx4_ib_demux_pv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct mlx4_ib_demux_pv_qp *sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct ib_grh *grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct ib_mad *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (wc.status == IB_WC_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) switch (wc.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) case IB_WC_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) kfree(sqp->tx_ring[wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) (MLX4_NUM_WIRE_BUFS - 1)].ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) spin_lock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) sqp->tx_ix_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) spin_unlock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) case IB_WC_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) (sqp->ring[wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) grh = &(((struct mlx4_mad_rcv_buf *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) (sqp->ring[wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) (MLX4_NUM_WIRE_BUFS - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) pr_err("Failed reposting SQP "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) "buf:%lld\n", wc.wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) pr_debug("mlx4_ib: completion error in tunnel: %d."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) " status = %d, wrid = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) ctx->slave, wc.status, wc.wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) kfree(sqp->tx_ring[wc.wr_id &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) (MLX4_NUM_WIRE_BUFS - 1)].ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) spin_lock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) sqp->tx_ix_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) spin_unlock(&sqp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) struct mlx4_ib_demux_pv_ctx **ret_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) struct mlx4_ib_demux_pv_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) *ret_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) ctx->ib_dev = &dev->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) ctx->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) ctx->slave = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) *ret_ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (dev->sriov.demux[port - 1].tun[slave]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) kfree(dev->sriov.demux[port - 1].tun[slave]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) dev->sriov.demux[port - 1].tun[slave] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) int ret, cq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct ib_cq_init_attr cq_attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (ctx->state != DEMUX_PV_STATE_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ctx->state = DEMUX_PV_STATE_STARTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) /* have QP0 only if link layer is IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (rdma_port_get_link_layer(ibdev, ctx->port) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) IB_LINK_LAYER_INFINIBAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) ctx->has_smi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (ctx->has_smi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) goto err_out_qp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) cq_size = 2 * nmbr_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (ctx->has_smi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) cq_size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) cq_attr.cqe = cq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) ctx->cq = ib_create_cq(ctx->ib_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) NULL, ctx, &cq_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (IS_ERR(ctx->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) ret = PTR_ERR(ctx->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) pr_err("Couldn't create tunnel CQ (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (IS_ERR(ctx->pd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) ret = PTR_ERR(ctx->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) pr_err("Couldn't create tunnel PD (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) goto err_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (ctx->has_smi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) pr_err("Couldn't create %s QP0 (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) create_tun ? "tunnel for" : "", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) goto err_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) pr_err("Couldn't create %s QP1 (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) create_tun ? "tunnel for" : "", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) goto err_qp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (create_tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) pr_err("Couldn't arm tunnel cq (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) goto err_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) ctx->state = DEMUX_PV_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) err_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ctx->wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) ib_destroy_qp(ctx->qp[1].qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) ctx->qp[1].qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) err_qp0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (ctx->has_smi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ib_destroy_qp(ctx->qp[0].qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) ctx->qp[0].qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) err_pd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) ib_dealloc_pd(ctx->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) ctx->pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) err_cq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) ib_destroy_cq(ctx->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) ctx->cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) err_out_qp0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (ctx->has_smi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) ctx->state = DEMUX_PV_STATE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) struct mlx4_ib_demux_pv_ctx *ctx, int flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (ctx->state > DEMUX_PV_STATE_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ctx->state = DEMUX_PV_STATE_DOWNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) flush_workqueue(ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (ctx->has_smi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) ib_destroy_qp(ctx->qp[0].qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) ctx->qp[0].qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) ib_destroy_qp(ctx->qp[1].qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) ctx->qp[1].qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) ib_dealloc_pd(ctx->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ctx->pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) ib_destroy_cq(ctx->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ctx->cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) ctx->state = DEMUX_PV_STATE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) int port, int do_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (!do_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) /* for master, destroy real sqp resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (slave == mlx4_master_func_num(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) destroy_pv_resources(dev, slave, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) dev->sriov.sqps[port - 1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) /* destroy the tunnel qp resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) destroy_pv_resources(dev, slave, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) dev->sriov.demux[port - 1].tun[slave], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /* create the tunnel qp resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) dev->sriov.demux[port - 1].tun[slave]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) /* for master, create the real sqp resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (!ret && slave == mlx4_master_func_num(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) dev->sriov.sqps[port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) void mlx4_ib_tunnels_update_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct mlx4_ib_demux_work *dmxw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) dmxw = container_of(work, struct mlx4_ib_demux_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) dmxw->do_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) kfree(dmxw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct mlx4_ib_demux_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) char name[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (!ctx->tun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ctx->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) ctx->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ctx->ib_dev = &dev->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) for (i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) i < min(dev->dev->caps.sqp_demux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) (u16)(dev->dev->persist->num_vfs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct mlx4_active_ports actv_ports =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) mlx4_get_active_ports(dev->dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (!test_bit(port - 1, actv_ports.ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) goto err_mcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ret = mlx4_ib_mcg_port_init(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) pr_err("Failed initializing mcg para-virt (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) goto err_mcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) snprintf(name, sizeof(name), "mlx4_ibt%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (!ctx->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) pr_err("Failed to create tunnelling WQ for port %d\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) goto err_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!ctx->wi_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) pr_err("Failed to create wire WQ for port %d\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) goto err_wiwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) snprintf(name, sizeof(name), "mlx4_ibud%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (!ctx->ud_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) pr_err("Failed to create up/down WQ for port %d\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) goto err_udwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) err_udwq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) destroy_workqueue(ctx->wi_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) ctx->wi_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) err_wiwq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) destroy_workqueue(ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) ctx->wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) err_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) mlx4_ib_mcg_port_cleanup(ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) err_mcg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) for (i = 0; i < dev->dev->caps.sqp_demux; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) free_pv_object(dev, i, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) kfree(ctx->tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) ctx->tun = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) flush_workqueue(sqp_ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (sqp_ctx->has_smi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ib_destroy_qp(sqp_ctx->qp[0].qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) sqp_ctx->qp[0].qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) ib_destroy_qp(sqp_ctx->qp[1].qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) sqp_ctx->qp[1].qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) ib_dealloc_pd(sqp_ctx->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) sqp_ctx->pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) ib_destroy_cq(sqp_ctx->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) sqp_ctx->cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) sqp_ctx->state = DEMUX_PV_STATE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) mlx4_ib_mcg_port_cleanup(ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (!ctx->tun[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) flush_workqueue(ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) flush_workqueue(ctx->wi_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) free_pv_object(dev, i, ctx->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) kfree(ctx->tun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) destroy_workqueue(ctx->ud_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) destroy_workqueue(ctx->wi_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) destroy_workqueue(ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (!mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /* initialize or tear down tunnel QPs for the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) for (i = 0; i < dev->dev->caps.num_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (!mlx4_is_mfunc(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) dev->sriov.is_going_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) spin_lock_init(&dev->sriov.going_down_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) mlx4_ib_cm_paravirt_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (mlx4_is_slave(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (i == mlx4_master_func_num(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) err = mlx4_ib_init_alias_guid_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) goto paravirt_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) err = mlx4_ib_device_register_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) goto sysfs_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) dev->dev->caps.sqp_demux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) for (i = 0; i < dev->num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) union ib_gid gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) goto demux_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) atomic64_set(&dev->sriov.demux[i].subnet_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) be64_to_cpu(gid.global.subnet_prefix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) &dev->sriov.sqps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) goto demux_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) goto free_pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) mlx4_ib_master_tunnels(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) free_pv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) demux_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) mlx4_ib_device_unregister_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) sysfs_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) mlx4_ib_destroy_alias_guid_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) paravirt_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) mlx4_ib_cm_paravirt_clean(dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (!mlx4_is_mfunc(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) dev->sriov.is_going_down = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (mlx4_is_master(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) for (i = 0; i < dev->num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) flush_workqueue(dev->sriov.demux[i].ud_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) kfree(dev->sriov.sqps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) dev->sriov.sqps[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) mlx4_ib_cm_paravirt_clean(dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) mlx4_ib_destroy_alias_guid_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) mlx4_ib_device_unregister_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }