^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <net/devlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <rdma/ib_smi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <rdma/ib_user_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <rdma/ib_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <rdma/ib_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <net/bonding.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/mlx4/driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/mlx4/cmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/mlx4/qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "mlx4_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <rdma/mlx4-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DRV_NAME MLX4_IB_DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DRV_VERSION "4.0-0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define MLX4_IB_CARD_REV_A0 0xA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) MODULE_AUTHOR("Roland Dreier");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int mlx4_ib_sm_guid_assign = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static const char mlx4_ib_version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) DRV_VERSION "\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u8 port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void init_query_mad(struct ib_smp *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mad->base_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) mad->class_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mad->method = IB_MGMT_METHOD_GET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int check_flow_steering_support(struct mlx4_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int eth_num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int ib_num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (dmfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) eth_num_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ib_num_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dmfs &= (!ib_num_ports ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) (!eth_num_ports ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (ib_num_ports && mlx4_is_mfunc(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dmfs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return dmfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static int num_ib_ports(struct mlx4_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int ib_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ib_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return ib_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mlx4_ib_dev *ibdev = to_mdev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (mlx4_is_bonded(ibdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct net_device *upper = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) upper = netdev_master_upper_dev_get_rcu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (upper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct net_device *active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) active = bond_option_active_slave_get_rcu(netdev_priv(upper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dev = active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct mlx4_cmd_mailbox *mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct mlx4_dev *dev = ibdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) union ib_gid *gid_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) mailbox = mlx4_alloc_cmd_mailbox(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (IS_ERR(mailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) gid_tbl = mailbox->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) err = mlx4_cmd(dev, mailbox->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) MLX4_SET_PORT_GID_TABLE << 8 | port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (mlx4_is_bonded(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) err += mlx4_cmd(dev, mailbox->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) MLX4_SET_PORT_GID_TABLE << 8 | 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mlx4_free_cmd_mailbox(dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct mlx4_cmd_mailbox *mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct mlx4_dev *dev = ibdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) union ib_gid gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __be32 rsrvd1[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __be16 rsrvd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u8 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __be32 rsrvd3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) } *gid_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) mailbox = mlx4_alloc_cmd_mailbox(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (IS_ERR(mailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) gid_tbl = mailbox->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) gid_tbl[i].version = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) gid_tbl[i].type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) err = mlx4_cmd(dev, mailbox->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (mlx4_is_bonded(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) err += mlx4_cmd(dev, mailbox->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) mlx4_free_cmd_mailbox(dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int mlx4_ib_update_gids(struct gid_entry *gids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void free_gid_entry(struct gid_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) memset(&entry->gid, 0, sizeof(entry->gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) kfree(entry->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) entry->ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct mlx4_ib_iboe *iboe = &ibdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct mlx4_port_gid_table *port_gid_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int free = -1, found = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int hw_update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct gid_entry *gids = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u16 vlan_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u8 mac[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (attr->port_num > MLX4_MAX_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) port_gid_table = &iboe->gids[attr->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_lock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!memcmp(&port_gid_table->gids[i].gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) &attr->gid, sizeof(attr->gid)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) port_gid_table->gids[i].gid_type == attr->gid_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) port_gid_table->gids[i].vlan_id == vlan_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) found = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) free = i; /* HW has space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (found < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (free < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!port_gid_table->gids[free].ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *context = port_gid_table->gids[free].ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) memcpy(&port_gid_table->gids[free].gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) &attr->gid, sizeof(attr->gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) port_gid_table->gids[free].gid_type = attr->gid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) port_gid_table->gids[free].vlan_id = vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) port_gid_table->gids[free].ctx->real_index = free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) port_gid_table->gids[free].ctx->refcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) hw_update = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) *context = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ctx->refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!ret && hw_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!gids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) free_gid_entry(&port_gid_table->gids[free]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) gids[i].gid_type = port_gid_table->gids[i].gid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_unlock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!ret && hw_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_lock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) free_gid_entry(&port_gid_table->gids[free]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spin_unlock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) kfree(gids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct gid_cache_context *ctx = *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct mlx4_ib_iboe *iboe = &ibdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct mlx4_port_gid_table *port_gid_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int hw_update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct gid_entry *gids = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (attr->port_num > MLX4_MAX_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) port_gid_table = &iboe->gids[attr->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_lock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ctx->refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!ctx->refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int real_index = ctx->real_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) free_gid_entry(&port_gid_table->gids[real_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) hw_update = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!ret && hw_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!gids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) memcpy(&gids[i].gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) &port_gid_table->gids[i].gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sizeof(union ib_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) gids[i].gid_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) port_gid_table->gids[i].gid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) spin_unlock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!ret && hw_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) kfree(gids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) const struct ib_gid_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct mlx4_ib_iboe *iboe = &ibdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct gid_cache_context *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct mlx4_port_gid_table *port_gid_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int real_index = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u8 port_num = attr->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (port_num > MLX4_MAX_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (mlx4_is_bonded(ibdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) port_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return attr->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) spin_lock_irqsave(&iboe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) port_gid_table = &iboe->gids[port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!memcmp(&port_gid_table->gids[i].gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) &attr->gid, sizeof(attr->gid)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) attr->gid_type == port_gid_table->gids[i].gid_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ctx = port_gid_table->gids[i].ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) real_index = ctx->real_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) spin_unlock_irqrestore(&iboe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return real_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int mlx4_ib_query_device(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct ib_device_attr *props,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct ib_udata *uhw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int have_ib_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct mlx4_uverbs_ex_query_device cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct mlx4_uverbs_ex_query_device_resp resp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct mlx4_clock_params clock_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (uhw->inlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (uhw->inlen < sizeof(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (cmd.comp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (cmd.reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) resp.response_length = offsetof(typeof(resp), response_length) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) sizeof(resp.response_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 1, NULL, NULL, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) memset(props, 0, sizeof *props);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) have_ib_ports = num_ib_ports(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) props->fw_ver = dev->dev->caps.fw_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) IB_DEVICE_PORT_ACTIVE_EVENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) IB_DEVICE_SYS_IMAGE_GUID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) IB_DEVICE_RC_RNR_NAK_GEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (dev->dev->caps.max_gso_sz &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) props->device_cap_flags |= IB_DEVICE_UD_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) props->device_cap_flags |= IB_DEVICE_XRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) props->vendor_part_id = dev->dev->persist->pdev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) props->max_mr_size = ~0ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) props->page_size_cap = dev->dev->caps.page_size_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) props->max_qp = dev->dev->quotas.qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) props->max_send_sge =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) props->max_recv_sge =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) props->max_sge_rd = MLX4_MAX_SGE_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) props->max_cq = dev->dev->quotas.cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) props->max_cqe = dev->dev->caps.max_cqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) props->max_mr = dev->dev->quotas.mpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) props->max_srq = dev->dev->quotas.srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) props->max_srq_sge = dev->dev->caps.max_srq_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) IB_ATOMIC_HCA : IB_ATOMIC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) props->masked_atomic_cap = props->atomic_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) props->max_pkeys = dev->dev->caps.pkey_table_len[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) props->max_mcast_grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) props->timestamp_mask = 0xFFFFFFFFFFFFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) props->max_ah = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) props->rss_caps.max_rwq_indirection_tables =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) props->max_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) props->rss_caps.max_rwq_indirection_table_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev->dev->caps.max_rss_tbl_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) props->max_wq_type_rq = props->max_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) resp.response_length += sizeof(resp.hca_core_clock_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (uhw->outlen >= resp.response_length +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) sizeof(resp.max_inl_recv_sz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) resp.response_length += sizeof(resp.max_inl_recv_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) sizeof(struct mlx4_wqe_data_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (props->rss_caps.supported_qpts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) resp.rss_caps.rx_hash_function =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) resp.rss_caps.rx_hash_fields_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) MLX4_IB_RX_HASH_SRC_IPV4 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) MLX4_IB_RX_HASH_DST_IPV4 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) MLX4_IB_RX_HASH_SRC_IPV6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) MLX4_IB_RX_HASH_DST_IPV6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) MLX4_IB_RX_HASH_SRC_PORT_TCP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) MLX4_IB_RX_HASH_DST_PORT_TCP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) MLX4_IB_RX_HASH_SRC_PORT_UDP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) MLX4_IB_RX_HASH_DST_PORT_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (dev->dev->caps.tunnel_offload_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) resp.rss_caps.rx_hash_fields_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) MLX4_IB_RX_HASH_INNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) resp.response_length = offsetof(typeof(resp), rss_caps) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) sizeof(resp.rss_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (dev->dev->caps.max_gso_sz &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ((mlx4_ib_port_link_layer(ibdev, 1) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) IB_LINK_LAYER_ETHERNET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) (mlx4_ib_port_link_layer(ibdev, 2) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) IB_LINK_LAYER_ETHERNET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) resp.tso_caps.supported_qpts |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 1 << IB_QPT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) resp.response_length = offsetof(typeof(resp), tso_caps) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) sizeof(resp.tso_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (uhw->outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) err = ib_copy_to_udata(uhw, &resp, resp.response_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static enum rdma_link_layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct mlx4_dev *dev = to_mdev(device)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static int ib_link_query_port(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct ib_port_attr *props, int netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int ext_active_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) in_mad->attr_mod = cpu_to_be32(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) props->lmc = out_mad->data[34] & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) props->sm_sl = out_mad->data[36] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) props->state = out_mad->data[32] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) props->phys_state = out_mad->data[33] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) props->gid_tbl_len = out_mad->data[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) props->active_width = out_mad->data[31] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) props->active_speed = out_mad->data[35] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) props->max_mtu = out_mad->data[41] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) props->active_mtu = out_mad->data[36] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) props->subnet_timeout = out_mad->data[51] & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) props->max_vl_num = out_mad->data[37] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) props->init_type_reply = out_mad->data[41] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* Check if extended speeds (EDR/FDR/...) are supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ext_active_speed = out_mad->data[62] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) switch (ext_active_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) props->active_speed = IB_SPEED_FDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) props->active_speed = IB_SPEED_EDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* If reported active speed is QDR, check if is FDR-10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (props->active_speed == IB_SPEED_QDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) in_mad->attr_mod = cpu_to_be32(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) NULL, NULL, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Checking LinkSpeedActive for FDR-10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (out_mad->data[15] & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) props->active_speed = IB_SPEED_FDR10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Avoid wrong speed value returned by FW if the IB link is down. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (props->state == IB_PORT_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) props->active_speed = IB_SPEED_SDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static u8 state_to_phys_state(enum ib_port_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return state == IB_PORT_ACTIVE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static int eth_link_query_port(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct ib_port_attr *props)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct mlx4_ib_dev *mdev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct mlx4_ib_iboe *iboe = &mdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) enum ib_mtu tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct mlx4_cmd_mailbox *mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int is_bonded = mlx4_is_bonded(mdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (IS_ERR(mailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return PTR_ERR(mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) IB_WIDTH_4X : IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) IB_SPEED_FDR : IB_SPEED_QDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) props->port_cap_flags = IB_PORT_CM_SUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) props->ip_gids = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) props->max_msg_sz = mdev->dev->caps.max_msg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (mdev->dev->caps.pkey_table_len[port])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) props->pkey_tbl_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) props->max_mtu = IB_MTU_4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) props->max_vl_num = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) props->state = IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) props->phys_state = state_to_phys_state(props->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) props->active_mtu = IB_MTU_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_lock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ndev = iboe->netdevs[port - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (ndev && is_bonded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) rcu_read_lock(); /* required to get upper dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ndev = netdev_master_upper_dev_get_rcu(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) tmp = iboe_get_mtu(ndev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) IB_PORT_ACTIVE : IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) props->phys_state = state_to_phys_state(props->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) spin_unlock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) mlx4_free_cmd_mailbox(mdev->dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct ib_port_attr *props, int netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* props being zeroed by the caller, avoid zeroing it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ib_link_query_port(ibdev, port, props, netw_view) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) eth_link_query_port(ibdev, port, props);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct ib_port_attr *props)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* returns host view */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return __mlx4_ib_query_port(ibdev, port, props, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) union ib_gid *gid, int netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int clear = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) in_mad->attr_mod = cpu_to_be32(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (mlx4_is_mfunc(dev->dev) && netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) memcpy(gid->raw, out_mad->data + 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (mlx4_is_mfunc(dev->dev) && !netw_view) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* For any index > 0, return the null guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) clear = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) in_mad->attr_mod = cpu_to_be32(index / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) NULL, NULL, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) memset(gid->raw + 8, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) union ib_gid *gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (rdma_protocol_ib(ibdev, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) union sl2vl_tbl_to_u64 sl2vl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int jj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) *sl2vl_tbl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) in_mad->attr_mod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) for (jj = 0; jj < 8; jj++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) *sl2vl_tbl = sl2vl64.sl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) u64 sl2vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) sl2vl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) u16 *pkey, int netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) in_mad->attr_mod = cpu_to_be32(index / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct ib_device_modify *props)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct mlx4_cmd_mailbox *mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (mlx4_is_slave(to_mdev(ibdev)->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * If possible, pass node desc to FW, so it can generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * a 144 trap. If cmd fails, just ignore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (IS_ERR(mailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u32 cap_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct mlx4_cmd_mailbox *mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (IS_ERR(mailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return PTR_ERR(mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) MLX4_CMD_WRAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) mlx4_free_cmd_mailbox(dev->dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct ib_port_modify *props)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct mlx4_ib_dev *mdev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct ib_port_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) u32 cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* return OK if this is RoCE. CM calls ib_modify_port() regardless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * of whether port link layer is ETH or IB. For ETH ports, qkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * violations and port capabilities are not meaningful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (is_eth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) mutex_lock(&mdev->cap_mask_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) err = ib_query_port(ibdev, port, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ~props->clr_port_cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) err = mlx4_ib_SET_PORT(mdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) !!(mask & IB_PORT_RESET_QKEY_CNTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct ib_device *ibdev = uctx->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct mlx4_ib_ucontext *context = to_mucontext(uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct mlx4_ib_alloc_ucontext_resp resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!dev->ib_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (ibdev->ops.uverbs_abi_ver ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) resp_v3.qp_tab_size = dev->dev->caps.num_qps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) resp.dev_caps = dev->dev->caps.userspace_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) resp.qp_tab_size = dev->dev->caps.num_qps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) resp.bf_reg_size = dev->dev->caps.bf_reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) resp.cqe_size = dev->dev->caps.cqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) INIT_LIST_HEAD(&context->db_page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mutex_init(&context->db_page_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) INIT_LIST_HEAD(&context->wqn_ranges_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) mutex_init(&context->wqn_ranges_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) err = ib_copy_to_udata(udata, &resp, sizeof(resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct mlx4_ib_dev *dev = to_mdev(context->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) switch (vma->vm_pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return rdma_user_mmap_io(context, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) to_mucontext(context)->uar.pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) pgprot_noncached(vma->vm_page_prot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (dev->dev->caps.bf_reg_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return rdma_user_mmap_io(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) context, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) to_mucontext(context)->uar.pfn +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dev->dev->caps.num_uars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) case 3: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct mlx4_clock_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return rdma_user_mmap_io(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) context, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) (pci_resource_start(dev->dev->persist->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) params.bar) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) params.offset) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct mlx4_ib_pd *pd = to_mpd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct ib_device *ibdev = ibpd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct ib_cq_init_attr cq_attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (IS_ERR(xrcd->pd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) err = PTR_ERR(xrcd->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) cq_attr.cqe = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (IS_ERR(xrcd->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) err = PTR_ERR(xrcd->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ib_dealloc_pd(xrcd->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ib_destroy_cq(to_mxrcd(xrcd)->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ib_dealloc_pd(to_mxrcd(xrcd)->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct mlx4_ib_qp *mqp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct mlx4_ib_gid_entry *ge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ge = kzalloc(sizeof *ge, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (!ge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) ge->gid = *gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (mlx4_ib_add_mc(mdev, mqp, gid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ge->port = mqp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ge->added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) mutex_lock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) list_add_tail(&ge->list, &mqp->gid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) mutex_unlock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct mlx4_ib_counters *ctr_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct counter_index *counter, *tmp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mutex_lock(&ctr_table->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (counter->allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) mlx4_counter_free(ibdev->dev, counter->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) list_del(&counter->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) kfree(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) mutex_unlock(&ctr_table->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) union ib_gid *gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!mqp->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) spin_lock_bh(&mdev->iboe.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) ndev = mdev->iboe.netdevs[mqp->port - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dev_hold(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) spin_unlock_bh(&mdev->iboe.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dev_put(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct mlx4_ib_steering {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct mlx4_flow_reg_id reg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) union ib_gid gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) #define LAST_ETH_FIELD vlan_tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) #define LAST_IB_FIELD sl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) #define LAST_IPV4_FIELD dst_ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) #define LAST_TCP_UDP_FIELD src_port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* Field is the last supported field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) #define FIELDS_NOT_SUPPORTED(filter, field)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) memchr_inv((void *)&filter.field +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) sizeof(filter.field), 0,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) sizeof(filter) -\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) offsetof(typeof(filter), field) -\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) sizeof(filter.field))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static int parse_flow_attr(struct mlx4_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) u32 qp_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) union ib_flow_spec *ib_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct _rule_hw *mlx4_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) enum mlx4_net_trans_rule_id type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) switch (ib_spec->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case IB_FLOW_SPEC_ETH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) type = MLX4_NET_TRANS_RULE_ID_ETH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case IB_FLOW_SPEC_IB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) type = MLX4_NET_TRANS_RULE_ID_IB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) mlx4_spec->ib.l3_qpn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) cpu_to_be32(qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) mlx4_spec->ib.qpn_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) case IB_FLOW_SPEC_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) type = MLX4_NET_TRANS_RULE_ID_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) case IB_FLOW_SPEC_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) case IB_FLOW_SPEC_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) type = ib_spec->type == IB_FLOW_SPEC_TCP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) MLX4_NET_TRANS_RULE_ID_TCP :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) MLX4_NET_TRANS_RULE_ID_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) mlx4_hw_rule_sz(dev, type) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return mlx4_hw_rule_sz(dev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct default_rules {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) __u8 link_layer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static const struct default_rules default_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) .mandatory_fields = {IB_FLOW_SPEC_IPV4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) .rules_create_list = {IB_FLOW_SPEC_IB},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) .link_layer = IB_LINK_LAYER_INFINIBAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) struct ib_flow_attr *flow_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) void *ib_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) const struct default_rules *pdefault_rules = default_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) memset(&field_types, 0, sizeof(field_types));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (link_layer != pdefault_rules->link_layer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ib_flow = flow_attr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /* we assume the specs are sorted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) j < flow_attr->num_of_specs; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) union ib_flow_spec *current_flow =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) (union ib_flow_spec *)ib_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* same layer but different type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) (pdefault_rules->mandatory_fields[k] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) IB_FLOW_SPEC_LAYER_MASK)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) (current_flow->type !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) pdefault_rules->mandatory_fields[k]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /* same layer, try match next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (current_flow->type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) pdefault_rules->mandatory_fields[k]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ib_flow +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) ((union ib_flow_spec *)ib_flow)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ib_flow = flow_attr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) for (j = 0; j < flow_attr->num_of_specs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* same layer and same type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (((union ib_flow_spec *)ib_flow)->type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) pdefault_rules->mandatory_not_fields[k])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static int __mlx4_ib_create_default_rules(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) struct mlx4_ib_dev *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) const struct default_rules *pdefault_rules,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct _rule_hw *mlx4_spec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) union ib_flow_spec ib_spec = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) switch (pdefault_rules->rules_create_list[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* no rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) case IB_FLOW_SPEC_IB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) ib_spec.type = IB_FLOW_SPEC_IB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) ib_spec.size = sizeof(struct ib_flow_spec_ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* invalid rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* We must put empty rule, qpn is being ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) mlx4_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) pr_info("invalid parsing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) mlx4_spec = (void *)mlx4_spec + ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) size += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) int domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) enum mlx4_net_trans_promisc_mode flow_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) u64 *reg_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) void *ib_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct mlx4_ib_dev *mdev = to_mdev(qp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct mlx4_cmd_mailbox *mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct mlx4_net_trans_rule_hw_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) int default_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) pr_err("Invalid priority value %d\n", flow_attr->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (IS_ERR(mailbox))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return PTR_ERR(mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ctrl = mailbox->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ctrl->port = flow_attr->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) ctrl->qpn = cpu_to_be32(qp->qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ib_flow = flow_attr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* Add default flows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (default_flow >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) ret = __mlx4_ib_create_default_rules(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) mdev, qp, default_table + default_flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) mailbox->buf + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) mlx4_free_cmd_mailbox(mdev->dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) size += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) for (i = 0; i < flow_attr->num_of_specs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) mailbox->buf + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) mlx4_free_cmd_mailbox(mdev->dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ib_flow += ((union ib_flow_spec *) ib_flow)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) size += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) flow_attr->num_of_specs == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) enum ib_flow_spec_type header_spec =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ((union ib_flow_spec *)(flow_attr + 1))->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (header_spec == IB_FLOW_SPEC_ETH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) MLX4_CMD_NATIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) pr_err("mcg table is full. Fail to register network rule.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) else if (ret == -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) else if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) pr_err("Invalid argument. Fail to register network rule.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) mlx4_free_cmd_mailbox(mdev->dev, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) err = mlx4_cmd(dev, reg_id, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) MLX4_CMD_NATIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) pr_err("Fail to detach network rule. registration id = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) reg_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u64 *reg_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) void *ib_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) union ib_flow_spec *ib_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct mlx4_dev *dev = to_mdev(qp->device)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return 0; /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ib_flow = flow_attr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ib_spec = (union ib_flow_spec *)ib_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return 0; /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) flow_attr->port, qp->qp_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) reg_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) struct ib_flow_attr *flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) enum mlx4_net_trans_promisc_mode *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (flow_attr->num_of_specs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) type[0] = MLX4_FS_MC_SNIFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) type[1] = MLX4_FS_UC_SNIFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) union ib_flow_spec *ib_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ib_spec = (union ib_flow_spec *)(flow_attr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (ib_spec->type != IB_FLOW_SPEC_ETH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /* if all is zero than MC and UC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) type[0] = MLX4_FS_MC_SNIFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) type[1] = MLX4_FS_UC_SNIFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) ib_spec->eth.mask.dst_mac[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ib_spec->eth.mask.dst_mac[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ib_spec->eth.mask.dst_mac[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ib_spec->eth.mask.dst_mac[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ib_spec->eth.mask.dst_mac[5]};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /* Above xor was only on MC bit, non empty mask is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * only if this bit is set and rest are zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!is_zero_ether_addr(&mac[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) type[0] = MLX4_FS_MC_SNIFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) type[0] = MLX4_FS_UC_SNIFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct ib_flow_attr *flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) int err = 0, i = 0, j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct mlx4_ib_flow *mflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) enum mlx4_net_trans_promisc_mode type[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) int is_bonded = mlx4_is_bonded(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) (flow_attr->type != IB_FLOW_ATTR_NORMAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (udata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) memset(type, 0, sizeof(type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (!mflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) switch (flow_attr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) case IB_FLOW_ATTR_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /* If dont trap flag (continue match) is set, under specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * condition traffic be replicated to given qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * without stealing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) err = mlx4_ib_add_dont_trap_rule(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) type[0] = MLX4_FS_REGULAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) case IB_FLOW_ATTR_ALL_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) type[0] = MLX4_FS_ALL_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) case IB_FLOW_ATTR_MC_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) type[0] = MLX4_FS_MC_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) case IB_FLOW_ATTR_SNIFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) type[0] = MLX4_FS_MIRROR_RX_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) type[1] = MLX4_FS_MIRROR_SX_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) while (i < ARRAY_SIZE(type) && type[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) type[i], &mflow->reg_id[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) goto err_create_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (is_bonded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /* Application always sees one port so the mirror rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * must be on port #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) flow_attr->port = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) err = __mlx4_ib_create_flow(qp, flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) MLX4_DOMAIN_UVERBS, type[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) &mflow->reg_id[j].mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) flow_attr->port = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) goto err_create_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) &mflow->reg_id[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) goto err_create_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (is_bonded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) flow_attr->port = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) &mflow->reg_id[j].mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) flow_attr->port = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) goto err_create_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* function to create mirror rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return &mflow->ibflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) err_create_flow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) while (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) mflow->reg_id[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) while (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) mflow->reg_id[j].mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) j--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) kfree(mflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct mlx4_ib_flow *mflow = to_mflow(flow_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (mflow->reg_id[i].mirror) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) err = __mlx4_ib_destroy_flow(mdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) mflow->reg_id[i].mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) kfree(mflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) struct mlx4_dev *dev = mdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct mlx4_ib_qp *mqp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct mlx4_ib_steering *ib_steering = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) struct mlx4_flow_reg_id reg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (mdev->dev->caps.steering_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) MLX4_STEERING_MODE_DEVICE_MANAGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (!ib_steering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) !!(mqp->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) prot, ®_id.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) pr_err("multicast attach op failed, err %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) goto err_malloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) reg_id.mirror = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (mlx4_is_bonded(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) (mqp->port == 1) ? 2 : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) !!(mqp->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) prot, ®_id.mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) err = add_gid_entry(ibqp, gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (ib_steering) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) memcpy(ib_steering->gid.raw, gid->raw, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ib_steering->reg_id = reg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) mutex_lock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) list_add(&ib_steering->list, &mqp->steering_rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) mutex_unlock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) err_add:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) prot, reg_id.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (reg_id.mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) prot, reg_id.mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) err_malloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) kfree(ib_steering);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) struct mlx4_ib_gid_entry *ge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct mlx4_ib_gid_entry *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct mlx4_ib_gid_entry *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (!memcmp(raw, ge->gid.raw, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) ret = ge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) struct mlx4_dev *dev = mdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct mlx4_ib_qp *mqp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct mlx4_ib_gid_entry *ge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct mlx4_flow_reg_id reg_id = {0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (mdev->dev->caps.steering_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) MLX4_STEERING_MODE_DEVICE_MANAGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct mlx4_ib_steering *ib_steering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) mutex_lock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) list_del(&ib_steering->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) mutex_unlock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (&ib_steering->list == &mqp->steering_rules) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) reg_id = ib_steering->reg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) kfree(ib_steering);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) prot, reg_id.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (mlx4_is_bonded(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) prot, reg_id.mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mutex_lock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) ge = find_gid_entry(mqp, gid->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (ge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) spin_lock_bh(&mdev->iboe.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) dev_hold(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) spin_unlock_bh(&mdev->iboe.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dev_put(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) list_del(&ge->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) kfree(ge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) pr_warn("could not find mgid entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) mutex_unlock(&mqp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static int init_node_data(struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) struct ib_smp *in_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct ib_smp *out_mad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (!in_mad || !out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) init_query_mad(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) kfree(in_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) kfree(out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) static ssize_t hca_type_show(struct device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) struct mlx4_ib_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) static DEVICE_ATTR_RO(hca_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) static ssize_t hw_rev_show(struct device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) struct mlx4_ib_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return sprintf(buf, "%x\n", dev->dev->rev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static DEVICE_ATTR_RO(hw_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static ssize_t board_id_show(struct device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct mlx4_ib_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) dev->dev->board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static DEVICE_ATTR_RO(board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static struct attribute *mlx4_class_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) &dev_attr_hw_rev.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) &dev_attr_hca_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) &dev_attr_board_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) static const struct attribute_group mlx4_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) .attrs = mlx4_class_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) struct diag_counter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) #define DIAG_COUNTER(_name, _offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) { .name = #_name, .offset = _offset }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static const struct diag_counter diag_basic[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) DIAG_COUNTER(rq_num_lle, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) DIAG_COUNTER(sq_num_lle, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) DIAG_COUNTER(rq_num_lqpoe, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) DIAG_COUNTER(sq_num_lqpoe, 0x0C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) DIAG_COUNTER(rq_num_lpe, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) DIAG_COUNTER(sq_num_lpe, 0x1C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) DIAG_COUNTER(rq_num_wrfe, 0x20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) DIAG_COUNTER(sq_num_wrfe, 0x24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) DIAG_COUNTER(sq_num_mwbe, 0x2C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) DIAG_COUNTER(sq_num_bre, 0x34),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) DIAG_COUNTER(sq_num_rire, 0x44),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) DIAG_COUNTER(rq_num_rire, 0x48),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) DIAG_COUNTER(sq_num_rae, 0x4C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) DIAG_COUNTER(rq_num_rae, 0x50),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) DIAG_COUNTER(sq_num_roe, 0x54),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) DIAG_COUNTER(sq_num_tree, 0x5C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) DIAG_COUNTER(sq_num_rree, 0x64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) DIAG_COUNTER(rq_num_rnr, 0x68),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) DIAG_COUNTER(sq_num_rnr, 0x6C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) DIAG_COUNTER(rq_num_oos, 0x100),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) DIAG_COUNTER(sq_num_oos, 0x104),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) static const struct diag_counter diag_ext[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) DIAG_COUNTER(rq_num_dup, 0x130),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) DIAG_COUNTER(sq_num_to, 0x134),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static const struct diag_counter diag_device_only[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) DIAG_COUNTER(num_cqovf, 0x1A0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) DIAG_COUNTER(rq_num_udsdprd, 0x118),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct mlx4_ib_diag_counters *diag = dev->diag_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (!diag[!!port_num].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) diag[!!port_num].num_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) RDMA_HW_STATS_DEFAULT_LIFESPAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct rdma_hw_stats *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) u8 port, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct mlx4_ib_diag_counters *diag = dev->diag_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) u32 hw_value[ARRAY_SIZE(diag_device_only) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ret = mlx4_query_diag_counters(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) diag[!!port].offset, hw_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) diag[!!port].num_counters, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) for (i = 0; i < diag[!!port].num_counters; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) stats->value[i] = hw_value[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return diag[!!port].num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) const char ***name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) u32 **offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) u32 *num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) bool port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) u32 num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) num_counters = ARRAY_SIZE(diag_basic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) num_counters += ARRAY_SIZE(diag_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) num_counters += ARRAY_SIZE(diag_device_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (!*name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if (!*offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) goto err_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) *num = num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) err_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) kfree(*name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) const char **name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) u32 *offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) bool port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) name[i] = diag_basic[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) offset[i] = diag_basic[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) name[j] = diag_ext[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) offset[j] = diag_ext[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) name[j] = diag_device_only[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) offset[j] = diag_device_only[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) .get_hw_stats = mlx4_ib_get_hw_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) bool per_port = !!(ibdev->dev->caps.flags2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (mlx4_is_slave(ibdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /* i == 1 means we are building port counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (i && !per_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) &diag[i].offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) &diag[i].num_counters, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) diag[i].offset, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) kfree(diag[i - 1].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) kfree(diag[i - 1].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) kfree(ibdev->diag_counters[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) kfree(ibdev->diag_counters[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) #define MLX4_IB_INVALID_MAC ((u64)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) u64 new_smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) u64 release_mac = MLX4_IB_INVALID_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct mlx4_ib_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) read_lock(&dev_base_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) new_smac = mlx4_mac_to_u64(dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) read_unlock(&dev_base_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /* no need for update QP1 and mac registration in non-SRIOV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (!mlx4_is_mfunc(ibdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) qp = ibdev->qp1_proxy[port - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) int new_smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) u64 old_smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) struct mlx4_update_qp_params update_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) mutex_lock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) old_smac = qp->pri.smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (new_smac == old_smac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (new_smac_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) update_params.smac_index = new_smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) &update_params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) release_mac = new_smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) /* if old port was zero, no mac was yet registered for this QP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if (qp->pri.smac_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) release_mac = old_smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) qp->pri.smac = new_smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) qp->pri.smac_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) qp->pri.smac_index = new_smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (release_mac != MLX4_IB_INVALID_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) mlx4_unregister_mac(ibdev->dev, port, release_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) mutex_unlock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct mlx4_ib_iboe *iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) int update_qps_port = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) iboe = &ibdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) spin_lock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) mlx4_foreach_ib_transport_port(port, ibdev->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) iboe->netdevs[port - 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (dev == iboe->netdevs[port - 1] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) event == NETDEV_UP || event == NETDEV_CHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) update_qps_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (dev == iboe->netdevs[port - 1] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) (event == NETDEV_UP || event == NETDEV_DOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) enum ib_port_state port_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct ib_event ibev = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (ib_get_cached_port_state(&ibdev->ib_dev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) &port_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (event == NETDEV_UP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) (port_state != IB_PORT_ACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) iboe->last_port_state[port - 1] != IB_PORT_DOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (event == NETDEV_DOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) (port_state != IB_PORT_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) iboe->last_port_state[port - 1] = port_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ibev.device = &ibdev->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) ibev.element.port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) IB_EVENT_PORT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) ib_dispatch_event(&ibev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) spin_unlock_bh(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (update_qps_port > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) mlx4_ib_update_qps(ibdev, dev, update_qps_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static int mlx4_ib_netdev_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct mlx4_ib_dev *ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (!net_eq(dev_net(dev), &init_net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) mlx4_ib_scan_netdevs(ibdev, dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) static void init_pkeys(struct mlx4_ib_dev *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) int slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (mlx4_is_master(ibdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) ++slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) for (i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /* master has the identity virt2phys pkey mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /* initialize pkey cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) for (i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) ibdev->pkeys.phys_pkey_cache[port-1][i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) (i) ? 0 : 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) int i, j, eq = 0, total_eqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) sizeof(ibdev->eq_table[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (!ibdev->eq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) for (i = 1; i <= dev->caps.num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) j++, total_eqs++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) ibdev->eq_table[eq] = total_eqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (!mlx4_assign_eq(dev, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) &ibdev->eq_table[eq]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) eq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) ibdev->eq_table[eq] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) for (i = eq; i < dev->caps.num_comp_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) ibdev->eq_table[i++] = -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* Advertise the new number of EQs to clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) ibdev->ib_dev.num_comp_vectors = eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) int total_eqs = ibdev->ib_dev.num_comp_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /* no eqs were allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (!ibdev->eq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) /* Reset the advertised EQ number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) ibdev->ib_dev.num_comp_vectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) for (i = 0; i < total_eqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) mlx4_release_eq(dev, ibdev->eq_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) kfree(ibdev->eq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) ibdev->eq_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) struct ib_port_immutable *immutable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) struct ib_port_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) struct mlx4_ib_dev *mdev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) immutable->max_mad_size = IB_MGMT_MAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) immutable->max_mad_size = IB_MGMT_MAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) err = ib_query_port(ibdev, port_num, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) immutable->pkey_tbl_len = attr.pkey_tbl_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) immutable->gid_tbl_len = attr.gid_tbl_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) static void get_fw_ver_str(struct ib_device *device, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct mlx4_ib_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) container_of(device, struct mlx4_ib_dev, ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) (int) (dev->dev->caps.fw_ver >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) (int) dev->dev->caps.fw_ver & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) static const struct ib_device_ops mlx4_ib_dev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) .driver_id = RDMA_DRIVER_MLX4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) .add_gid = mlx4_ib_add_gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) .alloc_mr = mlx4_ib_alloc_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) .alloc_pd = mlx4_ib_alloc_pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) .alloc_ucontext = mlx4_ib_alloc_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) .attach_mcast = mlx4_ib_mcg_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) .create_ah = mlx4_ib_create_ah,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) .create_cq = mlx4_ib_create_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) .create_qp = mlx4_ib_create_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) .create_srq = mlx4_ib_create_srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) .dealloc_pd = mlx4_ib_dealloc_pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) .del_gid = mlx4_ib_del_gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) .dereg_mr = mlx4_ib_dereg_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) .destroy_ah = mlx4_ib_destroy_ah,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) .destroy_cq = mlx4_ib_destroy_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) .destroy_qp = mlx4_ib_destroy_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) .destroy_srq = mlx4_ib_destroy_srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) .detach_mcast = mlx4_ib_mcg_detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) .drain_rq = mlx4_ib_drain_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) .drain_sq = mlx4_ib_drain_sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) .get_dev_fw_str = get_fw_ver_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) .get_dma_mr = mlx4_ib_get_dma_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) .get_link_layer = mlx4_ib_port_link_layer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) .get_netdev = mlx4_ib_get_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) .get_port_immutable = mlx4_port_immutable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) .map_mr_sg = mlx4_ib_map_mr_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) .mmap = mlx4_ib_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) .modify_cq = mlx4_ib_modify_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) .modify_device = mlx4_ib_modify_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) .modify_port = mlx4_ib_modify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) .modify_qp = mlx4_ib_modify_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) .modify_srq = mlx4_ib_modify_srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) .poll_cq = mlx4_ib_poll_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) .post_recv = mlx4_ib_post_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) .post_send = mlx4_ib_post_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) .post_srq_recv = mlx4_ib_post_srq_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) .process_mad = mlx4_ib_process_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) .query_ah = mlx4_ib_query_ah,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) .query_device = mlx4_ib_query_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) .query_gid = mlx4_ib_query_gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) .query_pkey = mlx4_ib_query_pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) .query_port = mlx4_ib_query_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) .query_qp = mlx4_ib_query_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) .query_srq = mlx4_ib_query_srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) .reg_user_mr = mlx4_ib_reg_user_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) .req_notify_cq = mlx4_ib_arm_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) .rereg_user_mr = mlx4_ib_rereg_user_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) .resize_cq = mlx4_ib_resize_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) .create_wq = mlx4_ib_create_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) .destroy_wq = mlx4_ib_destroy_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) .modify_wq = mlx4_ib_modify_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) ib_rwq_ind_tbl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) .alloc_mw = mlx4_ib_alloc_mw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) .dealloc_mw = mlx4_ib_dealloc_mw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) .alloc_xrcd = mlx4_ib_alloc_xrcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) .create_flow = mlx4_ib_create_flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) .destroy_flow = mlx4_ib_destroy_flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) static void *mlx4_ib_add(struct mlx4_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) struct mlx4_ib_dev *ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) int num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) struct mlx4_ib_iboe *iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) int ib_num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) int num_req_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) int allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) u32 counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct counter_index *new_counter_index = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) pr_info_once("%s", mlx4_ib_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) mlx4_foreach_ib_transport_port(i, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) num_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* No point in registering a device with no ports... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (num_ports == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (!ibdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) dev_err(&dev->persist->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) "Device struct alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) iboe = &ibdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) goto err_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) goto err_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (!ibdev->uar_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) goto err_uar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) ibdev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) ibdev->bond_next_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) ibdev->num_ports = num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 1 : ibdev->num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) ibdev->ib_dev.uverbs_cmd_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) (1ull << IB_USER_VERBS_CMD_REG_MR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) (1ull << IB_USER_VERBS_CMD_REREG_MR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) (1ull << IB_USER_VERBS_CMD_OPEN_QP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ibdev->ib_dev.uverbs_ex_cmd_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) IB_LINK_LAYER_ETHERNET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) IB_LINK_LAYER_ETHERNET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) ibdev->ib_dev.uverbs_ex_cmd_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) ibdev->ib_dev.uverbs_cmd_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) ibdev->ib_dev.uverbs_cmd_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (check_flow_steering_support(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) ibdev->ib_dev.uverbs_ex_cmd_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (!dev->caps.userspace_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) ibdev->ib_dev.ops.uverbs_abi_ver =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) mlx4_ib_alloc_eqs(dev, ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) spin_lock_init(&iboe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) if (init_node_data(ibdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) goto err_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) mlx4_init_sl2vl_tbl(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) for (i = 0; i < ibdev->num_ports; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) mutex_init(&ibdev->counters_table[i].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) iboe->last_port_state[i] = IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) for (i = 0; i < num_req_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) mutex_init(&ibdev->qp1_proxy_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) IB_LINK_LAYER_ETHERNET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) err = mlx4_counter_alloc(ibdev->dev, &counter_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) MLX4_RES_USAGE_DRIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /* if failed to allocate a new counter, use default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) counter_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) mlx4_get_default_counter_index(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) allocated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) counter_index = mlx4_get_default_counter_index(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) new_counter_index = kmalloc(sizeof(*new_counter_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (!new_counter_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) mlx4_counter_free(ibdev->dev, counter_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) goto err_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) new_counter_index->index = counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) new_counter_index->allocated = allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) list_add_tail(&new_counter_index->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) &ibdev->counters_table[i].counters_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) ibdev->counters_table[i].default_counter = counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) pr_info("counter index %d for port %d allocated %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) counter_index, i + 1, allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (mlx4_is_bonded(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) for (i = 1; i < ibdev->num_ports ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) new_counter_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) kmalloc(sizeof(struct counter_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) if (!new_counter_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) goto err_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) new_counter_index->index = counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) new_counter_index->allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) list_add_tail(&new_counter_index->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) &ibdev->counters_table[i].counters_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) ibdev->counters_table[i].default_counter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) ib_num_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) spin_lock_init(&ibdev->sm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) mutex_init(&ibdev->cap_mask_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) INIT_LIST_HEAD(&ibdev->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) spin_lock_init(&ibdev->reset_flow_resource_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) ib_num_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) MLX4_IB_UC_STEER_QPN_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) &ibdev->steer_qpn_base, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) MLX4_RES_USAGE_DRIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) goto err_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) ibdev->ib_uc_qpns_bitmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (!ibdev->ib_uc_qpns_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) goto err_steer_qp_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) bitmap_zero(ibdev->ib_uc_qpns_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) ibdev->steer_qpn_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) dev, ibdev->steer_qpn_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) ibdev->steer_qpn_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) ibdev->steer_qpn_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) goto err_steer_free_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) bitmap_fill(ibdev->ib_uc_qpns_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) ibdev->steer_qpn_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (mlx4_ib_alloc_diag_counters(ibdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) goto err_steer_free_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) &dev->persist->pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) goto err_diag_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (mlx4_ib_mad_init(ibdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) goto err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (mlx4_ib_init_sriov(ibdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) goto err_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (!iboe->nb.notifier_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) iboe->nb.notifier_call = mlx4_ib_netdev_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) err = register_netdevice_notifier(&iboe->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) iboe->nb.notifier_call = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) goto err_notif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) goto err_notif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) ibdev->ib_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) &ibdev->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (mlx4_is_mfunc(ibdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) init_pkeys(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) /* create paravirt contexts for any VFs which are active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (mlx4_is_master(ibdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) for (j = 0; j < MLX4_MFUNC_MAX; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) if (j == mlx4_master_func_num(ibdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (mlx4_is_slave_active(ibdev->dev, j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) do_slave_init(ibdev, j, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) return ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) err_notif:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (ibdev->iboe.nb.notifier_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (unregister_netdevice_notifier(&ibdev->iboe.nb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) pr_warn("failure unregistering notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) ibdev->iboe.nb.notifier_call = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) flush_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) mlx4_ib_close_sriov(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) err_mad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) mlx4_ib_mad_cleanup(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) err_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) ib_unregister_device(&ibdev->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) err_diag_counters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) mlx4_ib_diag_cleanup(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) err_steer_free_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) kfree(ibdev->ib_uc_qpns_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) err_steer_qp_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) ibdev->steer_qpn_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) err_counter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) for (i = 0; i < ibdev->num_ports; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) err_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) mlx4_ib_free_eqs(dev, ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) iounmap(ibdev->uar_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) err_uar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) mlx4_uar_free(dev, &ibdev->priv_uar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) err_pd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) mlx4_pd_free(dev, ibdev->priv_pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) err_dealloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) ib_dealloc_device(&ibdev->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) WARN_ON(!dev->ib_uc_qpns_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) dev->steer_qpn_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) get_count_order(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) *qpn = dev->steer_qpn_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (!qpn ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) qpn, dev->steer_qpn_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) /* not supposed to be here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) bitmap_release_region(dev->ib_uc_qpns_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) qpn - dev->steer_qpn_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) get_count_order(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) int is_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) size_t flow_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) struct ib_flow_attr *flow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) struct ib_flow_spec_ib *ib_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (is_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) flow_size = sizeof(struct ib_flow_attr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) sizeof(struct ib_flow_spec_ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) flow = kzalloc(flow_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (!flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) flow->port = mqp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) flow->num_of_specs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) flow->size = flow_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) ib_spec->type = IB_FLOW_SPEC_IB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) ib_spec->size = sizeof(struct ib_flow_spec_ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) /* Add an empty rule for IB L2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) MLX4_FS_REGULAR, &mqp->reg_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) kfree(flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) struct mlx4_ib_dev *ibdev = ibdev_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) int p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) ibdev->ib_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) flush_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) if (ibdev->iboe.nb.notifier_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (unregister_netdevice_notifier(&ibdev->iboe.nb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) pr_warn("failure unregistering notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) ibdev->iboe.nb.notifier_call = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) mlx4_ib_close_sriov(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) mlx4_ib_mad_cleanup(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) ib_unregister_device(&ibdev->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) mlx4_ib_diag_cleanup(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ibdev->steer_qpn_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) kfree(ibdev->ib_uc_qpns_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) iounmap(ibdev->uar_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) for (p = 0; p < ibdev->num_ports; ++p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) mlx4_CLOSE_PORT(dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) mlx4_ib_free_eqs(dev, ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) mlx4_uar_free(dev, &ibdev->priv_uar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) mlx4_pd_free(dev, ibdev->priv_pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) ib_dealloc_device(&ibdev->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) struct mlx4_ib_demux_work **dm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) struct mlx4_dev *dev = ibdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) struct mlx4_active_ports actv_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) unsigned int ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) unsigned int first_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) if (!mlx4_is_master(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) actv_ports = mlx4_get_active_ports(dev, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) if (!dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) for (i = 0; i < ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) if (!dm[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) kfree(dm[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) dm[i]->port = first_port + i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) dm[i]->slave = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) dm[i]->do_init = do_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) dm[i]->dev = ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) /* initialize or tear down tunnel QPs for the slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (!ibdev->sriov.is_going_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) for (i = 0; i < ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) for (i = 0; i < ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) kfree(dm[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) kfree(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) struct mlx4_ib_qp *mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) unsigned long flags_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) unsigned long flags_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) struct mlx4_ib_cq *send_mcq, *recv_mcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) struct list_head cq_notify_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) struct mlx4_cq *mcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) pr_warn("mlx4_ib_handle_catas_error was started\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) INIT_LIST_HEAD(&cq_notify_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) spin_lock_irqsave(&mqp->sq.lock, flags_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (mqp->sq.tail != mqp->sq.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) send_mcq = to_mcq(mqp->ibqp.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) spin_lock_irqsave(&send_mcq->lock, flags_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (send_mcq->mcq.comp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) mqp->ibqp.send_cq->comp_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (!send_mcq->mcq.reset_notify_added) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) send_mcq->mcq.reset_notify_added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) list_add_tail(&send_mcq->mcq.reset_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) &cq_notify_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) /* Now, handle the QP's receive queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) spin_lock_irqsave(&mqp->rq.lock, flags_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) /* no handling is needed for SRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (!mqp->ibqp.srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) if (mqp->rq.tail != mqp->rq.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) recv_mcq = to_mcq(mqp->ibqp.recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) spin_lock_irqsave(&recv_mcq->lock, flags_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (recv_mcq->mcq.comp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) mqp->ibqp.recv_cq->comp_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (!recv_mcq->mcq.reset_notify_added) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) recv_mcq->mcq.reset_notify_added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) list_add_tail(&recv_mcq->mcq.reset_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) &cq_notify_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) spin_unlock_irqrestore(&recv_mcq->lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) flags_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) mcq->comp(mcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) pr_warn("mlx4_ib_handle_catas_error ended\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) static void handle_bonded_port_state_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct ib_event_work *ew =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) container_of(work, struct ib_event_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) struct mlx4_ib_dev *ibdev = ew->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) enum ib_port_state bonded_port_state = IB_PORT_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) struct ib_event ibev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) kfree(ew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) spin_lock_bh(&ibdev->iboe.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) for (i = 0; i < MLX4_MAX_PORTS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) enum ib_port_state curr_port_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (!curr_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) curr_port_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) (netif_running(curr_netdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) netif_carrier_ok(curr_netdev)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) IB_PORT_ACTIVE : IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) curr_port_state : IB_PORT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) spin_unlock_bh(&ibdev->iboe.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) ibev.device = &ibdev->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) ibev.element.port_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) ib_dispatch_event(&ibev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) u64 sl2vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) port, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) sl2vl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) static void ib_sl2vl_update_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) struct mlx4_ib_dev *mdev = ew->ib_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) int port = ew->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) mlx4_ib_sl2vl_update(mdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) kfree(ew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) struct ib_event_work *ew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (ew) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) INIT_WORK(&ew->work, ib_sl2vl_update_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) ew->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) ew->ib_dev = ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) queue_work(wq, &ew->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) enum mlx4_dev_event event, unsigned long param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) struct ib_event ibev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) struct mlx4_eqe *eqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) struct ib_event_work *ew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) int p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) if (mlx4_is_bonded(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) ((event == MLX4_DEV_EVENT_PORT_UP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) (event == MLX4_DEV_EVENT_PORT_DOWN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (!ew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) INIT_WORK(&ew->work, handle_bonded_port_state_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) ew->ib_dev = ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) queue_work(wq, &ew->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) eqe = (struct mlx4_eqe *)param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) p = (int) param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) case MLX4_DEV_EVENT_PORT_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (p > ibdev->num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (!mlx4_is_slave(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) IB_LINK_LAYER_INFINIBAND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) if (mlx4_is_master(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) mlx4_ib_invalidate_all_guid_record(ibdev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) mlx4_sched_ib_sl2vl_update_work(ibdev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) ibev.event = IB_EVENT_PORT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) case MLX4_DEV_EVENT_PORT_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) if (p > ibdev->num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) ibev.event = IB_EVENT_PORT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) ibdev->ib_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) ibev.event = IB_EVENT_DEVICE_FATAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) mlx4_ib_handle_catas_error(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) ew = kmalloc(sizeof *ew, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) if (!ew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) INIT_WORK(&ew->work, handle_port_mgmt_change_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) ew->ib_dev = ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) /* need to queue only for port owner, which uses GEN_EQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) if (mlx4_is_master(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) queue_work(wq, &ew->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) handle_port_mgmt_change_event(&ew->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) case MLX4_DEV_EVENT_SLAVE_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) /* here, p is the slave id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) do_slave_init(ibdev, p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (mlx4_is_master(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) for (i = 1; i <= ibdev->num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) == IB_LINK_LAYER_INFINIBAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) mlx4_ib_slave_alias_guid_event(ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) p, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (mlx4_is_master(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) for (i = 1; i <= ibdev->num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) == IB_LINK_LAYER_INFINIBAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) mlx4_ib_slave_alias_guid_event(ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) p, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) /* here, p is the slave id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) do_slave_init(ibdev, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) ibev.device = ibdev_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) ib_dispatch_event(&ibev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) static struct mlx4_interface mlx4_ib_interface = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) .add = mlx4_ib_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) .remove = mlx4_ib_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) .event = mlx4_ib_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) .protocol = MLX4_PROT_IB_IPV6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) .flags = MLX4_INTFF_BONDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) static int __init mlx4_ib_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (!wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) err = mlx4_ib_mcg_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) goto clean_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) err = mlx4_register_interface(&mlx4_ib_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) goto clean_mcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) clean_mcg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) mlx4_ib_mcg_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) clean_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) static void __exit mlx4_ib_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) mlx4_unregister_interface(&mlx4_ib_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) mlx4_ib_mcg_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) module_init(mlx4_ib_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) module_exit(mlx4_ib_cleanup);