^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <rdma/ib_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <rdma/ib_pack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <rdma/ib_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <rdma/ib_mad.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/mlx4/driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/mlx4/qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "mlx4_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <rdma/mlx4-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct mlx4_ib_cq *recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct mlx4_ib_cq *recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct ib_udata *udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MLX4_IB_ACK_REQ_FREQ = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MLX4_IB_LINK_TYPE_IB = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MLX4_IB_LINK_TYPE_ETH = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MLX4_IB_MIN_SQ_STRIDE = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MLX4_IB_CACHE_LINE_SIZE = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MLX4_RAW_QP_MTU = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MLX4_RAW_QP_MSGMAX = 31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #ifndef ETH_ALEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define ETH_ALEN 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static const __be32 mlx4_ib_opcode[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) enum mlx4_ib_source_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) MLX4_IB_QP_SRC = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) MLX4_IB_RWQ_SRC = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 8 * MLX4_MFUNC_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int proxy_sqp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int real_sqp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* PPF or Native -- real SQP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (real_sqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* VF or PF -- proxy SQP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (mlx4_is_mfunc(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) for (i = 0; i < dev->dev->caps.num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) proxy_sqp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (proxy_sqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* used for INIT/CLOSE port logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int proxy_qp0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int real_qp0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* PPF or Native -- real QP0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (real_qp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* VF or PF -- proxy QP0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (mlx4_is_mfunc(dev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) for (i = 0; i < dev->dev->caps.num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) proxy_qp0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return proxy_qp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return mlx4_buf_offset(&qp->buf, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Stamp a SQ WQE so that it is invalid if prefetched by marking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * first four bytes of every 64 byte chunk with 0xffffffff, except for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * the very first chunk of the WQE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __be32 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct mlx4_wqe_ctrl_seg *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ctrl = (struct mlx4_wqe_ctrl_seg *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 64; i < s; i += 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) wqe = buf + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *wqe = cpu_to_be32(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (type == MLX4_EVENT_TYPE_PATH_MIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ibqp->event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) event.device = ibqp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) event.element.qp = ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) case MLX4_EVENT_TYPE_PATH_MIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) event.event = IB_EVENT_PATH_MIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) case MLX4_EVENT_TYPE_COMM_EST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) event.event = IB_EVENT_COMM_EST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) case MLX4_EVENT_TYPE_SQ_DRAINED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) event.event = IB_EVENT_SQ_DRAINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) event.event = IB_EVENT_QP_LAST_WQE_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) event.event = IB_EVENT_QP_FATAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) event.event = IB_EVENT_PATH_MIG_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) event.event = IB_EVENT_QP_REQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) event.event = IB_EVENT_QP_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pr_warn("Unexpected event type %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) "on QP %06x\n", type, qp->qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ibqp->event_handler(&event, ibqp->qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) type, qp->qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * UD WQEs must have a datagram segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * RC and UC WQEs might have a remote address segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * MLX WQEs need two extra inline data segments (for the UD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * header and space for the ICRC).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case MLX4_IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return sizeof (struct mlx4_wqe_ctrl_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) sizeof (struct mlx4_wqe_datagram_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) case MLX4_IB_QPT_PROXY_SMI_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case MLX4_IB_QPT_PROXY_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case MLX4_IB_QPT_PROXY_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return sizeof (struct mlx4_wqe_ctrl_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sizeof (struct mlx4_wqe_datagram_seg) + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) case MLX4_IB_QPT_TUN_SMI_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) case MLX4_IB_QPT_TUN_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return sizeof (struct mlx4_wqe_ctrl_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) sizeof (struct mlx4_wqe_datagram_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case MLX4_IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return sizeof (struct mlx4_wqe_ctrl_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) sizeof (struct mlx4_wqe_raddr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case MLX4_IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return sizeof (struct mlx4_wqe_ctrl_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) sizeof (struct mlx4_wqe_masked_atomic_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) sizeof (struct mlx4_wqe_raddr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case MLX4_IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case MLX4_IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return sizeof (struct mlx4_wqe_ctrl_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ALIGN(MLX4_IB_UD_HEADER_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) MLX4_INLINE_ALIGN) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sizeof (struct mlx4_wqe_inline_seg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sizeof (struct mlx4_wqe_data_seg)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ALIGN(4 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) sizeof (struct mlx4_wqe_inline_seg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) sizeof (struct mlx4_wqe_data_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return sizeof (struct mlx4_wqe_ctrl_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u32 inl_recv_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Sanity check RQ size before proceeding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!has_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (cap->max_recv_wr || inl_recv_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) qp->rq.wqe_cnt = qp->rq.max_gs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 max_inl_recv_sz = dev->dev->caps.max_rq_sg *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) sizeof(struct mlx4_wqe_data_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u32 wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* HW requires >= 1 RQ entry with >= 1 gather entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) inl_recv_sz > max_inl_recv_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* leave userspace return values as they were, so as not to break ABI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (is_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cap->max_recv_sge = qp->rq.max_gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) cap->max_recv_wr = qp->rq.max_post =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cap->max_recv_sge = min(qp->rq.max_gs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) min(dev->dev->caps.max_sq_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dev->dev->caps.max_rq_sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Sanity check SQ size before proceeding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * For MLX transport we need 2 extra S/G entries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * one for the header and one for the checksum at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) send_wqe_overhead(type, qp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (s > dev->dev->caps.max_sq_desc_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * We need to leave 2 KB + 1 WR of headroom in the SQ to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * allow HW to prefetch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) qp->sq_spare_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) qp->sq.max_gs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) (min(dev->dev->caps.max_sq_desc_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) (1 << qp->sq.wqe_shift)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) send_wqe_overhead(type, qp->flags)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) sizeof (struct mlx4_wqe_data_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) (qp->sq.wqe_cnt << qp->sq.wqe_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) qp->rq.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) qp->sq.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cap->max_send_wr = qp->sq.max_post =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) qp->sq.wqe_cnt - qp->sq_spare_wqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) cap->max_send_sge = min(qp->sq.max_gs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) min(dev->dev->caps.max_sq_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dev->dev->caps.max_rq_sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* We don't support inline sends for kernel QPs (yet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) cap->max_inline_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static int set_user_sq_size(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct mlx4_ib_create_qp *ucmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Sanity check SQ size before proceeding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ucmd->log_sq_stride >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) qp->sq.wqe_shift = ucmd->log_sq_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) (qp->sq.wqe_cnt << qp->sq.wqe_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) qp->sqp_proxy_rcv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!qp->sqp_proxy_rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) for (i = 0; i < qp->rq.wqe_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) qp->sqp_proxy_rcv[i].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!qp->sqp_proxy_rcv[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) qp->sqp_proxy_rcv[i].map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) sizeof (struct mlx4_ib_proxy_sqp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kfree(qp->sqp_proxy_rcv[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) while (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) --i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sizeof (struct mlx4_ib_proxy_sqp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kfree(qp->sqp_proxy_rcv[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) kfree(qp->sqp_proxy_rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) qp->sqp_proxy_rcv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) for (i = 0; i < qp->rq.wqe_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sizeof (struct mlx4_ib_proxy_sqp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) kfree(qp->sqp_proxy_rcv[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) kfree(qp->sqp_proxy_rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static bool qp_has_rq(struct ib_qp_init_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return !attr->srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) for (i = 0; i < dev->caps.num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (qpn == dev->caps.spec_qps[i].qp0_proxy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return !!dev->caps.spec_qps[i].qp0_qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mutex_lock(&dev->counters_table[qp->port - 1].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mlx4_counter_free(dev->dev, qp->counter_index->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) list_del(&qp->counter_index->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) kfree(qp->counter_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) qp->counter_index = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct mlx4_ib_create_qp_rss *ucmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) rss_ctx->base_qpn_tbl_sz = init_attr->rwq_ind_tbl->ind_tbl[0]->wq_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) (init_attr->rwq_ind_tbl->log_ind_tbl_size << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) memcpy(rss_ctx->rss_key, ucmd->rx_hash_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) MLX4_EN_RSS_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) pr_debug("RX Hash function is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) MLX4_IB_RX_HASH_DST_IPV4 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) MLX4_IB_RX_HASH_SRC_IPV6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) MLX4_IB_RX_HASH_DST_IPV6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) MLX4_IB_RX_HASH_SRC_PORT_TCP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) MLX4_IB_RX_HASH_DST_PORT_TCP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) MLX4_IB_RX_HASH_SRC_PORT_UDP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) MLX4_IB_RX_HASH_DST_PORT_UDP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) MLX4_IB_RX_HASH_INNER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ucmd->rx_hash_fields_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rss_ctx->flags = MLX4_RSS_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) rss_ctx->flags |= MLX4_RSS_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pr_debug("RX Hash fields_mask for UDP is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (rss_ctx->flags & MLX4_RSS_IPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (rss_ctx->flags & MLX4_RSS_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (rss_ctx->flags & MLX4_RSS_IPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (rss_ctx->flags & MLX4_RSS_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (dev->dev->caps.tunnel_offload_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Hash according to inner headers if exist, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * according to outer headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pr_debug("RSS Hash for inner headers isn't supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return (-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int create_qp_rss(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct mlx4_ib_create_qp_rss *ucmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) goto err_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) INIT_LIST_HEAD(&qp->gid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) INIT_LIST_HEAD(&qp->steering_rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) qp->state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Set dummy send resources to be compatible with HV and PRM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) qp->sq_no_prefetch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) qp->sq.wqe_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) qp->mtt = (to_mqp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!qp->rss_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto err_qp_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) kfree(qp->rss_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) err_qp_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mlx4_qp_remove(dev->dev, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) mlx4_qp_free(dev->dev, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) err_qpn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) mlx4_qp_release_range(dev->dev, qpn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct mlx4_ib_create_qp_rss ucmd = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) size_t required_cmd_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) pr_debug("RSS QP with NULL udata\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (udata->outlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) sizeof(ucmd.reserved1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (udata->inlen < required_cmd_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pr_debug("invalid inlen\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pr_debug("copy failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (ucmd.comp_mask || ucmd.reserved1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (udata->inlen > sizeof(ucmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) !ib_is_udata_cleared(udata, sizeof(ucmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) udata->inlen - sizeof(ucmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) pr_debug("inlen is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pr_debug("RSS QP with unsupported QP type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) init_attr->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (init_attr->create_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) pr_debug("RSS QP doesn't support create flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (init_attr->send_cq || init_attr->cap.max_send_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pr_debug("RSS QP with unsupported send attributes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) qp->pri.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) qp->alt.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) qp->ibqp.qp_num = qp->mqp.qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * This function allocates a WQN from a range which is consecutive and aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * to its size. In case the range is full, then it creates a new range and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * allocates WQN from it. The new range will be used for following allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct mlx4_ib_qp *qp, int range_size, int *wqn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct mlx4_wqn_range *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) mutex_lock(&context->wqn_ranges_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) range = list_first_entry_or_null(&context->wqn_ranges_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct mlx4_wqn_range, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!range || (range->refcount == range->size) || range->dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) range = kzalloc(sizeof(*range), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) err = mlx4_qp_reserve_range(dev->dev, range_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) range_size, &range->base_wqn, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) qp->mqp.usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) kfree(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) range->size = range_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) list_add(&range->list, &context->wqn_ranges_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) } else if (range_size != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Requesting a new range (>1) when last range is still open, is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * not valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) qp->wqn_range = range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *wqn = range->base_wqn + range->refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) range->refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) mutex_unlock(&context->wqn_ranges_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct mlx4_ib_qp *qp, bool dirty_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct mlx4_wqn_range *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mutex_lock(&context->wqn_ranges_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) range = qp->wqn_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) range->refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!range->refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mlx4_qp_release_range(dev->dev, range->base_wqn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) range->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) list_del(&range->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) kfree(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) } else if (dirty_release) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * A range which one of its WQNs is destroyed, won't be able to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * reused for further WQN allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * The next created WQ will allocate a new range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) range->dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mutex_unlock(&context->wqn_ranges_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct ib_udata *udata, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct mlx4_ib_dev *dev = to_mdev(pd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) udata, struct mlx4_ib_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct mlx4_ib_cq *mcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int range_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct mlx4_ib_create_wq wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) size_t copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) spin_lock_init(&qp->sq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) spin_lock_init(&qp->rq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) INIT_LIST_HEAD(&qp->gid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) INIT_LIST_HEAD(&qp->steering_rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) qp->state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (ib_copy_from_udata(&wq, udata, copy_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) wq.reserved[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) pr_debug("user command isn't supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) pr_debug("WQN range size must be equal or smaller than %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dev->dev->caps.max_rss_tbl_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) range_size = 1 << wq.log_range_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) qp->flags |= MLX4_IB_QP_SCATTER_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) qp->sq_no_prefetch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) qp->sq.wqe_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) (qp->sq.wqe_cnt << qp->sq.wqe_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (IS_ERR(qp->umem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) err = PTR_ERR(qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) goto err_wrid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) goto err_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Hardware wants QPN written in big-endian order (after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * shifting) for send doorbell. Precompute this value to save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * a little bit when posting sends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) qp->mqp.event = mlx4_ib_wq_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) to_mcq(init_attr->recv_cq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Maintain device to QPs access, needed for further handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * via reset flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) list_add_tail(&qp->qps_list, &dev->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Maintain CQ to QPs access, needed for further handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * via reset flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) mcq = to_mcq(init_attr->send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) mcq = to_mcq(init_attr->recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) to_mcq(init_attr->recv_cq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) err_qpn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) mlx4_ib_release_wqn(context, qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) err_wrid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) mlx4_ib_db_unmap_user(context, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) err_mtt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mlx4_mtt_cleanup(dev->dev, &qp->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ib_umem_release(qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct ib_udata *udata, int sqpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct mlx4_ib_dev *dev = to_mdev(pd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) udata, struct mlx4_ib_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct mlx4_ib_cq *mcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* When tunneling special qps, we use a plain UD qp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (sqpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (mlx4_is_mfunc(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) (!mlx4_is_master(dev->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (init_attr->qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) qp_type = MLX4_IB_QPT_PROXY_GSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (mlx4_is_master(dev->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) qp0_enabled_vf(dev->dev, sqpn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) qp_type = MLX4_IB_QPT_PROXY_SMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) qpn = sqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* add extra sg entry for tunneling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) init_attr->cap.max_recv_sge++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) container_of(init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct mlx4_ib_qp_tunnel_init_attr, init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) tnl_init->proxy_qp_type != IB_QPT_GSI) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) !mlx4_is_master(dev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (tnl_init->proxy_qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) qp_type = MLX4_IB_QPT_TUN_GSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) tnl_init->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) qp_type = MLX4_IB_QPT_TUN_SMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* we are definitely in the PPF here, since we are creating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * tunnel QPs. base_tunnel_sqpn is therefore valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) sqpn = qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (init_attr->qp_type == IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) qp_type == MLX4_IB_QPT_GSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!qp->sqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) qp->mlx4_ib_qp_type = qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) spin_lock_init(&qp->sq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_lock_init(&qp->rq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) INIT_LIST_HEAD(&qp->gid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) INIT_LIST_HEAD(&qp->steering_rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) qp->state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct mlx4_ib_create_qp ucmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) size_t copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) copy_len = sizeof(struct mlx4_ib_create_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) qp->inl_recv_sz = ucmd.inl_recv_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (!(dev->dev->caps.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) pr_debug("scatter FCS is unsupported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) qp->flags |= MLX4_IB_QP_SCATTER_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err = set_rq_size(dev, &init_attr->cap, udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) qp_has_rq(init_attr), qp, qp->inl_recv_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) qp->sq_no_prefetch = ucmd.sq_no_prefetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) err = set_user_sq_size(dev, qp, &ucmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) qp->umem =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (IS_ERR(qp->umem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) err = PTR_ERR(qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (qp_has_rq(init_attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) err = set_rq_size(dev, &init_attr->cap, udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) qp_has_rq(init_attr), qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) qp->sq_no_prefetch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) qp->flags |= MLX4_IB_QP_LSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (dev->steering_support ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) MLX4_STEERING_MODE_DEVICE_MANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) qp->flags |= MLX4_IB_QP_NETIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (qp_has_rq(init_attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) err = mlx4_db_alloc(dev->dev, &qp->db, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) *qp->db.db = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) &qp->buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) goto err_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) &qp->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sizeof(u64), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sizeof(u64), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (!qp->sq.wrid || !qp->rq.wrid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) goto err_wrid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) qp->mqp.usage = MLX4_RES_USAGE_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (sqpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (alloc_proxy_bufs(pd->device, qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) goto err_wrid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * otherwise, the WQE BlueFlame setup flow wrongly causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * VLAN insertion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (init_attr->qp_type == IB_QPT_RAW_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) (init_attr->cap.max_send_wr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) MLX4_RESERVE_ETH_BF_QP : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) (init_attr->cap.max_recv_wr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) MLX4_RESERVE_A0_QP : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) qp->mqp.usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (qp->flags & MLX4_IB_QP_NETIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) err = mlx4_qp_reserve_range(dev->dev, 1, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) &qpn, 0, qp->mqp.usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) goto err_proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto err_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (init_attr->qp_type == IB_QPT_XRC_TGT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) qp->mqp.qpn |= (1 << 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * Hardware wants QPN written in big-endian order (after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * shifting) for send doorbell. Precompute this value to save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * a little bit when posting sends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) qp->mqp.event = mlx4_ib_qp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) to_mcq(init_attr->recv_cq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* Maintain device to QPs access, needed for further handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * via reset flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) list_add_tail(&qp->qps_list, &dev->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Maintain CQ to QPs access, needed for further handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * via reset flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) mcq = to_mcq(init_attr->send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) mcq = to_mcq(init_attr->recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) to_mcq(init_attr->recv_cq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) err_qpn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!sqpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (qp->flags & MLX4_IB_QP_NETIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) mlx4_ib_steer_qp_free(dev, qpn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) mlx4_qp_release_range(dev->dev, qpn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) err_proxy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) free_proxy_bufs(pd->device, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) err_wrid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (qp_has_rq(init_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) mlx4_ib_db_unmap_user(context, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) kvfree(qp->sq.wrid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) kvfree(qp->rq.wrid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) err_mtt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) mlx4_mtt_cleanup(dev->dev, &qp->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (!qp->umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ib_umem_release(qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) err_db:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (!udata && qp_has_rq(init_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) mlx4_db_free(dev->dev, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) kfree(qp->sqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case IB_QPS_RESET: return MLX4_QP_STATE_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) case IB_QPS_INIT: return MLX4_QP_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) case IB_QPS_RTR: return MLX4_QP_STATE_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) case IB_QPS_RTS: return MLX4_QP_STATE_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) case IB_QPS_SQD: return MLX4_QP_STATE_SQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) case IB_QPS_SQE: return MLX4_QP_STATE_SQER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) case IB_QPS_ERR: return MLX4_QP_STATE_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) default: return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (send_cq == recv_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) spin_lock(&send_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) __acquire(&recv_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) spin_lock(&send_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) spin_lock(&recv_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) __releases(&send_cq->lock) __releases(&recv_cq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (send_cq == recv_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) __release(&recv_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) spin_unlock(&send_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) spin_unlock(&recv_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) spin_unlock(&send_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) spin_unlock(&send_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) spin_unlock(&recv_cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static void del_gid_entries(struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct mlx4_ib_gid_entry *ge, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) list_del(&ge->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) kfree(ge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return to_mpd(qp->ibqp.pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) case IB_QPT_XRC_TGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) *recv_cq = *send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) case IB_QPT_XRC_INI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *send_cq = to_mcq(qp->ibqp.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) *recv_cq = *send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) to_mcq(qp->ibwq.cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) *recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (qp->state != IB_QPS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) wq->rss_usecnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) pr_warn("modify QP %06x to RESET failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) qp->mqp.qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) mlx4_qp_remove(dev->dev, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) mlx4_qp_free(dev->dev, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) del_gid_entries(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) enum mlx4_ib_source_type src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct mlx4_ib_cq *send_cq, *recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (qp->state != IB_QPS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) pr_warn("modify QP %06x to RESET failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) qp->mqp.qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) qp->pri.smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) qp->pri.smac_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (qp->alt.smac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) qp->alt.smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (qp->pri.vid < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) qp->pri.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) qp->pri.candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) qp->pri.update_vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (qp->alt.vid < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) qp->alt.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) qp->alt.candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) qp->alt.update_vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) get_cqs(qp, src, &send_cq, &recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) mlx4_ib_lock_cqs(send_cq, recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* del from lists under both locks above to protect reset flow paths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) list_del(&qp->qps_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) list_del(&qp->cq_send_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) list_del(&qp->cq_recv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (send_cq != recv_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) mlx4_qp_remove(dev->dev, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) mlx4_ib_unlock_cqs(send_cq, recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) mlx4_qp_free(dev->dev, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (qp->flags & MLX4_IB_QP_NETIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) else if (src == MLX4_IB_RWQ_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) mlx4_ib_release_wqn(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct mlx4_ib_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ibucontext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) qp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) mlx4_mtt_cleanup(dev->dev, &qp->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (qp->rq.wqe_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct mlx4_ib_ucontext *mcontext =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct mlx4_ib_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) mlx4_ib_db_unmap_user(mcontext, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) kvfree(qp->sq.wrid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) kvfree(qp->rq.wrid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) free_proxy_bufs(&dev->ib_dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (qp->rq.wqe_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mlx4_db_free(dev->dev, &qp->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ib_umem_release(qp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) del_gid_entries(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /* Native or PPF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!mlx4_is_mfunc(dev->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) (mlx4_is_master(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) attr->create_flags & MLX4_IB_SRIOV_SQP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return dev->dev->phys_caps.base_sqpn +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) (attr->qp_type == IB_QPT_SMI ? 0 : 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) attr->port_num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* PF or VF -- creating proxies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (attr->qp_type == IB_QPT_SMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) u16 xrcdn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (init_attr->rwq_ind_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * We only support LSO, vendor flag1, and multicast loopback blocking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * and only for kernel UD QPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) MLX4_IB_SRIOV_TUNNEL_QP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) MLX4_IB_SRIOV_SQP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) MLX4_IB_QP_NETIF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) MLX4_IB_QP_CREATE_ROCE_V2_GSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (init_attr->qp_type != IB_QPT_UD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (init_attr->create_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (udata && init_attr->create_flags & ~(sup_u_create_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) MLX4_IB_QP_CREATE_ROCE_V2_GSI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) init_attr->qp_type != IB_QPT_UD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) (init_attr->create_flags & MLX4_IB_SRIOV_SQP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) init_attr->qp_type > IB_QPT_GSI) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) init_attr->qp_type != IB_QPT_GSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) switch (init_attr->qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) case IB_QPT_XRC_TGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) pd = to_mxrcd(init_attr->xrcd)->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) case IB_QPT_XRC_INI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) init_attr->recv_cq = init_attr->send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) case IB_QPT_RAW_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) qp->pri.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) qp->alt.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) err = create_qp_common(pd, init_attr, udata, 0, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) qp->ibqp.qp_num = qp->mqp.qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) qp->xrcdn = xrcdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) case IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) case IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int sqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 1, 1, &sqpn, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) MLX4_RES_USAGE_DRIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) qp->pri.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) qp->alt.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) err = create_qp_common(pd, init_attr, udata, sqpn, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) qp->port = init_attr->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* Don't support raw QPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct ib_udata *udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct mlx4_ib_dev *dev = to_mdev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct mlx4_ib_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) qp = kzalloc(sizeof(*qp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (!qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) mutex_init(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (init_attr->qp_type == IB_QPT_GSI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct mlx4_ib_sqp *sqp = qp->sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (is_eth &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (IS_ERR(sqp->roce_v2_gsi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) sqp->roce_v2_gsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) to_mqp(sqp->roce_v2_gsi)->flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) MLX4_IB_ROCE_V2_GSI_QP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) struct mlx4_ib_dev *dev = to_mdev(qp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct mlx4_ib_qp *mqp = to_mqp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (is_qp0(dev, mqp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) mlx4_CLOSE_PORT(dev->dev, mqp->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) dev->qp1_proxy[mqp->port - 1] == mqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) dev->qp1_proxy[mqp->port - 1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (mqp->counter_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) mlx4_ib_free_qp_counter(dev, mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (qp->rwq_ind_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) destroy_qp_rss(dev, mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) kfree(mqp->sqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) kfree(mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct mlx4_ib_qp *mqp = to_mqp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct mlx4_ib_sqp *sqp = mqp->sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (sqp->roce_v2_gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) ib_destroy_qp(sqp->roce_v2_gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return _mlx4_ib_destroy_qp(qp, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) case MLX4_IB_QPT_XRC_INI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) case MLX4_IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) case MLX4_IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) case MLX4_IB_QPT_PROXY_SMI_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) MLX4_QP_ST_MLX : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case MLX4_IB_QPT_PROXY_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) case MLX4_IB_QPT_TUN_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) case MLX4_IB_QPT_PROXY_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) MLX4_QP_ST_UD : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) default: return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) int attr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) u8 dest_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) u32 access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) u32 hw_access_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) dest_rd_atomic = attr->max_dest_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) dest_rd_atomic = qp->resp_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (attr_mask & IB_QP_ACCESS_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) access_flags = attr->qp_access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) access_flags = qp->atomic_rd_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (!dest_rd_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) access_flags &= IB_ACCESS_REMOTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (access_flags & IB_ACCESS_REMOTE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) hw_access_flags |= MLX4_QP_BIT_RRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) hw_access_flags |= MLX4_QP_BIT_RAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (access_flags & IB_ACCESS_REMOTE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) hw_access_flags |= MLX4_QP_BIT_RWE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return cpu_to_be32(hw_access_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) int attr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (attr_mask & IB_QP_PKEY_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) sqp->pkey_index = attr->pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (attr_mask & IB_QP_QKEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) sqp->qkey = attr->qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (attr_mask & IB_QP_SQ_PSN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) sqp->send_psn = attr->sq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int _mlx4_set_path(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) const struct rdma_ah_attr *ah,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int vidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) int smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (rdma_ah_get_static_rate(ah)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) path->static_rate = rdma_ah_get_static_rate(ah) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) MLX4_STAT_RATE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) --path->static_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) path->static_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) const struct ib_global_route *grh = rdma_ah_read_grh(ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) int real_sgid_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (real_sgid_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return real_sgid_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) pr_err("sgid_index (%u) too large. max is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) path->grh_mylmc |= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) path->mgid_index = real_sgid_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) path->hop_limit = grh->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) path->tclass_flowlabel =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) cpu_to_be32((grh->traffic_class << 20) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) (grh->flow_label));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) memcpy(path->rgid, grh->dgid.raw, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) path->feup |= MLX4_FEUP_FORCE_ETH_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (vlan_tag < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (smac_info->vid < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) /* both valid vlan ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (smac_info->vid != vlan_tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* different VIDs. unreg old and reg new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) smac_info->candidate_vid = vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) smac_info->candidate_vlan_index = vidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) smac_info->candidate_vlan_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) smac_info->update_vid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) path->vlan_index = vidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) path->vlan_index = smac_info->vlan_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* no current vlan tag in qp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) smac_info->candidate_vid = vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) smac_info->candidate_vlan_index = vidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) smac_info->candidate_vlan_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) smac_info->update_vid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) path->vlan_index = vidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) path->fl = 1 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /* have current vlan tag. unregister it at modify-qp success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (smac_info->vid < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) smac_info->candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) smac_info->update_vid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* get smac_index for RoCE use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * If no smac was yet assigned, register one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * If one was already assigned, but the new mac differs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * unregister the old one and register the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if ((!smac_info->smac && !smac_info->smac_port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) smac_info->smac != smac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* register candidate now, unreg if needed, after success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) smac_index = mlx4_register_mac(dev->dev, port, smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (smac_index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) smac_info->candidate_smac_index = smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) smac_info->candidate_smac = smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) smac_info->candidate_smac_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) smac_index = smac_info->smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) memcpy(path->dmac, ah->roce.dmac, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) path->ackto = MLX4_IB_LINK_TYPE_ETH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /* put MAC table smac index for IBoE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) path->grh_mylmc = (u8) (smac_index) | 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) enum ib_qp_attr_mask qp_attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) struct mlx4_ib_qp *mqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct mlx4_qp_path *path, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) u16 vlan_id, u8 *smac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) return _mlx4_set_path(dev, &qp->ah_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) mlx4_mac_to_u64(smac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) path, &mqp->pri, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) const struct ib_qp_attr *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) enum ib_qp_attr_mask qp_attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct mlx4_ib_qp *mqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) struct mlx4_qp_path *path, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return _mlx4_set_path(dev, &qp->alt_ah_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) path, &mqp->alt, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct mlx4_ib_gid_entry *ge, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ge->added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) ge->port = qp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct mlx4_qp_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) u64 u64_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) int smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (!qp->pri.smac && !qp->pri.smac_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (smac_index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) qp->pri.candidate_smac_index = smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) qp->pri.candidate_smac = u64_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) qp->pri.candidate_smac_port = qp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct counter_index *new_counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) u32 tmp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) IB_LINK_LAYER_ETHERNET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) err = mlx4_counter_alloc(dev->dev, &tmp_idx, MLX4_RES_USAGE_DRIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (!new_counter_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) mlx4_counter_free(dev->dev, tmp_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) new_counter_index->index = tmp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) new_counter_index->allocated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) qp->counter_index = new_counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) mutex_lock(&dev->counters_table[qp->port - 1].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) list_add_tail(&new_counter_index->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) &dev->counters_table[qp->port - 1].counters_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) MLX4_QPC_ROCE_MODE_1 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) MLX4_QPC_ROCE_MODE_2 = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) switch (gid_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) case IB_GID_TYPE_ROCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return MLX4_QPC_ROCE_MODE_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) case IB_GID_TYPE_ROCE_UDP_ENCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return MLX4_QPC_ROCE_MODE_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) return MLX4_QPC_ROCE_MODE_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * Go over all RSS QP's childes (WQs) and apply their HW state according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * their logic state if the RSS QP is the first RSS QP associated for the WQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) /* Mlx4_ib restrictions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * WQ's is associated to a port according to the RSS QP it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * associates to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * In case the WQ is associated to a different port by another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * RSS QP, return a failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if ((wq->rss_usecnt > 0) && (wq->port != port_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) wq->port = port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) wq->rss_usecnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (i && err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) for (j = (i - 1); j >= 0; j--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) struct ib_wq *ibwq = ind_tbl->ind_tbl[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if ((wq->rss_usecnt == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) (ibwq->state == IB_WQS_RDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) udata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) pr_warn("failed to reverse WQN=0x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) ibwq->wq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) wq->rss_usecnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) pr_warn("failed to reverse WQN=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) ibwq->wq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) wq->rss_usecnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static void fill_qp_rss_context(struct mlx4_qp_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct mlx4_ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct mlx4_rss_context *rss_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) rss_context = (void *)context + offsetof(struct mlx4_qp_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) rss_context->default_qpn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) rss_context->base_qpn_udp = rss_context->default_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) rss_context->flags = qp->rss_ctx->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) /* Currently support just toeplitz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) rss_context->hash_fn = MLX4_RSS_HASH_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) memcpy(rss_context->rss_key, qp->rss_ctx->rss_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) MLX4_EN_RSS_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) const struct ib_qp_attr *attr, int attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) enum ib_qp_state cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) enum ib_qp_state new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct ib_srq *ibsrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) const struct ib_gid_attr *gid_attr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct ib_rwq_ind_table *rwq_ind_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) enum ib_qp_type qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct mlx4_ib_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct mlx4_ib_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct mlx4_ib_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct mlx4_ib_cq *send_cq, *recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) udata, struct mlx4_ib_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct mlx4_qp_context *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) enum mlx4_qp_optpar optpar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) int sqd_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) int steer_qp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) int counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (src_type == MLX4_IB_RWQ_SRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct ib_wq *ibwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) ibwq = (struct ib_wq *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) ibsrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) rwq_ind_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) qp_type = IB_QPT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) qp = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) dev = to_mdev(ibwq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) pd = to_mpd(ibwq->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct ib_qp *ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) ibqp = (struct ib_qp *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ibsrq = ibqp->srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) rwq_ind_tbl = ibqp->rwq_ind_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) qp_type = ibqp->qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) dev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) pd = get_pd(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* APM is not supported under RoCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (attr_mask & IB_QP_ALT_PATH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) IB_LINK_LAYER_ETHERNET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) context = kzalloc(sizeof *context, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (!context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!(attr_mask & IB_QP_PATH_MIG_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) optpar |= MLX4_QP_OPTPAR_PM_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) switch (attr->path_mig_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) case IB_MIG_MIGRATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) case IB_MIG_REARM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) case IB_MIG_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (qp->inl_recv_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) context->param3 |= cpu_to_be32(1 << 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if (qp->flags & MLX4_IB_QP_SCATTER_FCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) context->param3 |= cpu_to_be32(1 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) else if (qp_type == IB_QPT_RAW_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) else if (qp_type == IB_QPT_UD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (qp->flags & MLX4_IB_QP_LSO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) context->mtu_msgmax = (IB_MTU_4096 << 5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ilog2(dev->dev->caps.max_gso_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) } else if (attr_mask & IB_QP_PATH_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) pr_err("path MTU (%u) is invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) attr->path_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) context->mtu_msgmax = (attr->path_mtu << 5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ilog2(dev->dev->caps.max_msg_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (qp->rq.wqe_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) context->rq_size_stride |= qp->rq.wqe_shift - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (qp->sq.wqe_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) context->sq_size_stride |= qp->sq.wqe_shift - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (new_state == IB_QPS_RESET && qp->counter_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) mlx4_ib_free_qp_counter(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) context->xrcd = cpu_to_be32((u32) qp->xrcdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (qp_type == IB_QPT_RAW_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) context->param3 |= cpu_to_be32(1 << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (ucontext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) context->usr_page = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) context->usr_page = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (attr_mask & IB_QP_DEST_QPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (attr_mask & IB_QP_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) !(attr_mask & IB_QP_AV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) mlx4_set_sched(&context->pri_path, attr->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) err = create_qp_lb_counter(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) counter_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) dev->counters_table[qp->port - 1].default_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (qp->counter_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) counter_index = qp->counter_index->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (counter_index != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) context->pri_path.counter_index = counter_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) if (qp->counter_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) context->pri_path.fl |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) MLX4_FL_ETH_SRC_CHECK_MC_LB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) context->pri_path.vlan_control |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) context->pri_path.counter_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) MLX4_SINK_COUNTER_INDEX(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (qp->flags & MLX4_IB_QP_NETIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) mlx4_ib_steer_qp_reg(dev, qp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) steer_qp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (qp_type == IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) context->rlkey_roce_mode |= (qpc_roce_mode << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (attr_mask & IB_QP_PKEY_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) context->pri_path.disable_pkey_check = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) context->pri_path.pkey_index = attr->pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (attr_mask & IB_QP_AV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) u8 port_num = mlx4_is_bonded(dev->dev) ? 1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) u16 vlan = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) u8 smac[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) int is_eth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) gid_attr = attr->ah_attr.grh.sgid_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) err = rdma_read_gid_l2_fields(gid_attr, &vlan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) &smac[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) port_num, vlan, smac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) MLX4_QP_OPTPAR_SCHED_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (is_eth &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) context->rlkey_roce_mode |= (qpc_roce_mode << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (attr_mask & IB_QP_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) context->pri_path.ackto |= attr->timeout << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (attr_mask & IB_QP_ALT_PATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (attr->alt_port_num == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) attr->alt_port_num > dev->dev->caps.num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (attr->alt_pkey_index >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) dev->dev->caps.pkey_table_len[attr->alt_port_num])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) &context->alt_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) attr->alt_port_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) context->alt_path.pkey_index = attr->alt_pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) context->alt_path.ackto = attr->alt_timeout << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) context->pd = cpu_to_be32(pd->pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (!rwq_ind_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) get_cqs(qp, src_type, &send_cq, &recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) } else { /* Set dummy CQs to be compatible with HV and PRM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) recv_cq = send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) /* Set "fast registration enabled" for all kernel QPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (!ucontext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) context->params1 |= cpu_to_be32(1 << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (attr_mask & IB_QP_RNR_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (attr_mask & IB_QP_RETRY_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (attr->max_rd_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) context->params1 |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) optpar |= MLX4_QP_OPTPAR_SRA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (attr_mask & IB_QP_SQ_PSN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) context->next_send_psn = cpu_to_be32(attr->sq_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (attr->max_dest_rd_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) context->params2 |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) optpar |= MLX4_QP_OPTPAR_RRA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (ibsrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (attr_mask & IB_QP_MIN_RNR_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (attr_mask & IB_QP_RQ_PSN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (attr_mask & IB_QP_QKEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (qp->mlx4_ib_qp_type &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (mlx4_is_mfunc(dev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) MLX4_RESERVED_QKEY_BASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) pr_err("Cannot use reserved QKEY"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) " 0x%x (range 0xffff0000..0xffffffff"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) " is reserved)\n", attr->qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) context->qkey = cpu_to_be32(attr->qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) optpar |= MLX4_QP_OPTPAR_Q_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (ibsrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) context->srqn = cpu_to_be32(1 << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) to_msrq(ibsrq)->msrq.srqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (qp->rq.wqe_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) cur_state == IB_QPS_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) new_state == IB_QPS_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) context->db_rec_addr = cpu_to_be64(qp->db.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (cur_state == IB_QPS_INIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) new_state == IB_QPS_RTR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) qp_type == IB_QPT_UD || qp_type == IB_QPT_RAW_PACKET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) context->pri_path.sched_queue = (qp->port - 1) << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) qp->mlx4_ib_qp_type &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) context->pri_path.fl = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) context->pri_path.fl = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) IB_LINK_LAYER_ETHERNET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) context->pri_path.feup = 1 << 7; /* don't fsm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) /* handle smac_index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) err = handle_eth_ud_smac_index(dev, qp, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) dev->qp1_proxy[qp->port - 1] = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (qp_type == IB_QPT_RAW_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) MLX4_IB_LINK_TYPE_ETH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /* set QP to receive both tunneled & non-tunneled packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (!rwq_ind_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) context->srqn = cpu_to_be32(7 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) int is_eth = rdma_port_get_link_layer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) &dev->ib_dev, qp->port) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) IB_LINK_LAYER_ETHERNET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) sqd_event = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) sqd_event = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (!ucontext &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) cur_state == IB_QPS_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) new_state == IB_QPS_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) context->rlkey_roce_mode |= (1 << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * Before passing a kernel QP to the HW, make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * ownership bits of the send queue are set and the SQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * headroom is stamped so that the hardware doesn't start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * processing stale work requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (!ucontext &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) cur_state == IB_QPS_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) new_state == IB_QPS_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct mlx4_wqe_ctrl_seg *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) for (i = 0; i < qp->sq.wqe_cnt; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) ctrl = get_send_wqe(qp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) ctrl->owner_opcode = cpu_to_be32(1 << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) ctrl->qpn_vlan.fence_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 1 << (qp->sq.wqe_shift - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) stamp_send_wqe(qp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (rwq_ind_tbl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) cur_state == IB_QPS_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) new_state == IB_QPS_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) fill_qp_rss_context(context, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) to_mlx4_state(new_state), context, optpar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) sqd_event, &qp->mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) qp->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (attr_mask & IB_QP_ACCESS_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) qp->atomic_rd_en = attr->qp_access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) qp->resp_depth = attr->max_dest_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (attr_mask & IB_QP_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) qp->port = attr->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) update_mcg_macs(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (attr_mask & IB_QP_ALT_PATH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) qp->alt_port = attr->alt_port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (is_sqp(dev, qp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) store_sqp_attrs(qp->sqp, attr, attr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) * If we moved QP0 to RTR, bring the IB link up; if we moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * QP0 to RESET or ERROR, bring the link back down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (is_qp0(dev, qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (mlx4_INIT_PORT(dev->dev, qp->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) pr_warn("INIT_PORT failed for port %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) qp->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) mlx4_CLOSE_PORT(dev->dev, qp->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * If we moved a kernel QP to RESET, clean up all old CQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) * entries and reinitialize the QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (new_state == IB_QPS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (!ucontext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) ibsrq ? to_msrq(ibsrq) : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (send_cq != recv_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) qp->rq.head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) qp->rq.tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) qp->sq.head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) qp->sq.tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) qp->sq_next_wqe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (qp->rq.wqe_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) *qp->db.db = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (qp->flags & MLX4_IB_QP_NETIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) mlx4_ib_steer_qp_reg(dev, qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) qp->pri.smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) qp->pri.smac_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (qp->alt.smac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) qp->alt.smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (qp->pri.vid < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) qp->pri.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) qp->pri.candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) qp->pri.update_vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if (qp->alt.vid < 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) qp->alt.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) qp->alt.candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) qp->alt.update_vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (err && qp->counter_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) mlx4_ib_free_qp_counter(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (err && steer_qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) mlx4_ib_steer_qp_reg(dev, qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) kfree(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (qp->pri.candidate_smac ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) qp->pri.smac = qp->pri.candidate_smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) qp->pri.smac_index = qp->pri.candidate_smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) qp->pri.smac_port = qp->pri.candidate_smac_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) qp->pri.candidate_smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) qp->pri.candidate_smac_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) qp->pri.candidate_smac_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (qp->alt.candidate_smac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (qp->alt.smac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) qp->alt.smac = qp->alt.candidate_smac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) qp->alt.smac_index = qp->alt.candidate_smac_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) qp->alt.smac_port = qp->alt.candidate_smac_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) qp->alt.candidate_smac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) qp->alt.candidate_smac_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) qp->alt.candidate_smac_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) if (qp->pri.update_vid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (qp->pri.candidate_vid < 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) qp->pri.candidate_vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (qp->pri.vid < 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) qp->pri.vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) qp->pri.vid = qp->pri.candidate_vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) qp->pri.vlan_port = qp->pri.candidate_vlan_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) qp->pri.vlan_index = qp->pri.candidate_vlan_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) qp->pri.candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) qp->pri.update_vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (qp->alt.update_vid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (qp->alt.candidate_vid < 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) qp->alt.candidate_vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (qp->alt.vid < 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) qp->alt.vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) qp->alt.vid = qp->alt.candidate_vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) qp->alt.vlan_port = qp->alt.candidate_vlan_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) qp->alt.vlan_index = qp->alt.candidate_vlan_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) qp->alt.candidate_vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) qp->alt.update_vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK = (IB_QP_STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) IB_QP_PORT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) int attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) struct mlx4_ib_qp *qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) enum ib_qp_state cur_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) mutex_lock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) attr_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) pr_debug("qpn 0x%x: invalid attribute mask specified "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) "for transition %d to %d. qp_type %d,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) " attr_mask 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) ibqp->qp_num, cur_state, new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) ibqp->qp_type, attr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (ibqp->rwq_ind_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) if (!(((cur_state == IB_QPS_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) (new_state == IB_QPS_INIT)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ((cur_state == IB_QPS_INIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) (new_state == IB_QPS_RTR)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) ibqp->qp_num, cur_state, new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (attr_mask & ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) ibqp->qp_num, attr_mask, cur_state, new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) if ((ibqp->qp_type == IB_QPT_RC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) (ibqp->qp_type == IB_QPT_UD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) (ibqp->qp_type == IB_QPT_UC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) (ibqp->qp_type == IB_QPT_XRC_INI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) attr->port_num = mlx4_ib_bond_next_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /* no sense in changing port_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * when ports are bonded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) attr_mask &= ~IB_QP_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if ((attr_mask & IB_QP_PORT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) pr_debug("qpn 0x%x: invalid port number (%d) specified "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) "for transition %d to %d. qp_type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) ibqp->qp_num, attr->port_num, cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) new_state, ibqp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) IB_LINK_LAYER_ETHERNET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (attr_mask & IB_QP_PKEY_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) "for transition %d to %d. qp_type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) ibqp->qp_num, attr->pkey_index, cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) new_state, ibqp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) "Transition %d to %d. qp_type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) ibqp->qp_num, attr->max_rd_atomic, cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) new_state, ibqp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) "Transition %d to %d. qp_type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) new_state, ibqp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (cur_state == new_state && cur_state == IB_QPS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) cur_state, new_state, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (ibqp->rwq_ind_tbl && err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) attr->port_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) mutex_unlock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) int attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) struct mlx4_ib_qp *mqp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) struct mlx4_ib_sqp *sqp = mqp->sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (sqp->roce_v2_gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) for (i = 0; i < dev->caps.num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) if (qpn == dev->caps.spec_qps[i].qp0_proxy ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) qpn == dev->caps.spec_qps[i].qp0_tunnel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) *qkey = dev->caps.spec_qps[i].qp0_qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) const struct ib_ud_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) void *wqe, unsigned *mlx_seg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct mlx4_ib_sqp *sqp = qp->sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) struct ib_device *ib_dev = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) struct mlx4_wqe_mlx_seg *mlx = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) struct mlx4_ib_ah *ah = to_mah(wr->ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) u32 qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) int send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) int header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) int spc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) if (wr->wr.opcode != IB_WR_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) send_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) for (i = 0; i < wr->wr.num_sge; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) send_size += wr->wr.sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /* for proxy-qp0 sends, need to add in size of tunnel header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) /* for tunnel-qp0 sends, tunnel header is already in s/g list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) send_size += sizeof (struct mlx4_ib_tunnel_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) sqp->ud_header.lrh.service_level =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) sqp->ud_header.lrh.destination_lid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) cpu_to_be16(ah->av.ib.g_slid & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) sqp->ud_header.lrh.source_lid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) cpu_to_be16(ah->av.ib.g_slid & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) /* force loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) mlx->rlid = sqp->ud_header.lrh.destination_lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) sqp->ud_header.lrh.virtual_lane = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) sqp->ud_header.bth.destination_qpn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (mlx4_is_master(mdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) sqp->ud_header.immediate_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) * Inline data segments may not cross a 64 byte boundary. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) * our UD header is bigger than the space available up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) * next 64 byte boundary in the WQE, use two inline data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * segments to hold the UD header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) spc = MLX4_INLINE_ALIGN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (header_size <= spc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) inl->byte_count = cpu_to_be32(1 << 31 | header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) memcpy(inl + 1, sqp->header_buf, header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) inl->byte_count = cpu_to_be32(1 << 31 | spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) memcpy(inl + 1, sqp->header_buf, spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) inl = (void *) (inl + 1) + spc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) * Need a barrier here to make sure all the data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) * visible before the byte_count field is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) * Otherwise the HCA prefetcher could grab the 64-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * chunk with this inline segment and get a valid (!=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * 0xffffffff) byte count but stale data, and end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) * generating a packet with bad headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) * The first inline segment's byte_count field doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) * need a barrier, because it comes after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) * control/MLX segment and therefore is at an offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) * of 16 mod 64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) i = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) *mlx_seg_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) union sl2vl_tbl_to_u64 tmp_vltab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) u8 vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (sl > 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) return 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) vl = tmp_vltab.sl8[sl >> 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (sl & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) vl &= 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) vl >>= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) return vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) int index, union ib_gid *gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) enum ib_gid_type *gid_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct mlx4_ib_iboe *iboe = &ibdev->iboe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) struct mlx4_port_gid_table *port_gid_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) port_gid_table = &iboe->gids[port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) spin_lock_irqsave(&iboe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) *gid_type = port_gid_table->gids[index].gid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) spin_unlock_irqrestore(&iboe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) if (rdma_is_zero_gid(gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) #define MLX4_ROCEV2_QP1_SPORT 0xC000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) void *wqe, unsigned *mlx_seg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) struct mlx4_ib_sqp *sqp = qp->sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) struct ib_device *ib_dev = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct mlx4_ib_dev *ibdev = to_mdev(ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) struct mlx4_wqe_mlx_seg *mlx = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) struct mlx4_wqe_ctrl_seg *ctrl = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) struct mlx4_ib_ah *ah = to_mah(wr->ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) union ib_gid sgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) int send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) int header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) int spc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) u16 vlan = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) bool is_eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) bool is_vlan = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) bool is_grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) bool is_udp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) int ip_version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) send_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) for (i = 0; i < wr->wr.num_sge; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) send_size += wr->wr.sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) is_grh = mlx4_ib_ah_grh_present(ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) enum ib_gid_type gid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* When multi-function is enabled, the ib_core gid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) * indexes don't necessarily match the hw ones, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) * we must use our own cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) be32_to_cpu(ah->av.ib.port_pd) >> 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) ah->av.ib.gid_index, &sgid.raw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) err = fill_gid_by_hw_index(ibdev, qp->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ah->av.ib.gid_index, &sgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) &gid_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (is_udp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) ip_version = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) ip_version = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) is_grh = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) is_vlan = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) ip_version, is_udp, 0, &sqp->ud_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) if (!is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) sqp->ud_header.lrh.service_level =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (is_grh || (ip_version == 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) sqp->ud_header.grh.traffic_class =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) sqp->ud_header.grh.flow_label =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) /* When multi-function is enabled, the ib_core gid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * indexes don't necessarily match the hw ones, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) * we must use our own cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) sqp->ud_header.grh.source_gid.global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) .subnet_prefix =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) cpu_to_be64(atomic64_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) &(to_mdev(ib_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) ->sriov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) .demux[qp->port - 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) .subnet_prefix)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) sqp->ud_header.grh.source_gid.global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) .interface_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) to_mdev(ib_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) ->sriov.demux[qp->port - 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) .guid_cache[ah->av.ib.gid_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) sqp->ud_header.grh.source_gid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) ah->ibah.sgid_attr->gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) memcpy(sqp->ud_header.grh.destination_gid.raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) ah->av.ib.dgid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) if (ip_version == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) sqp->ud_header.ip4.tos =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) sqp->ud_header.ip4.id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) sqp->ud_header.ip4.frag_off = htons(IP_DF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) memcpy(&sqp->ud_header.ip4.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) sgid.raw + 12, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (is_udp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) sqp->ud_header.udp.csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (!is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) mlx->flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) (sqp->ud_header.lrh.destination_lid ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) IB_LID_PERMISSIVE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) MLX4_WQE_MLX_SLR :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) (sqp->ud_header.lrh.service_level << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) mlx->flags |= cpu_to_be32(0x1); /* force loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) mlx->rlid = sqp->ud_header.lrh.destination_lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) switch (wr->wr.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) sqp->ud_header.immediate_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) sqp->ud_header.immediate_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) struct in6_addr in6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) u16 ether_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) ether_type = (!is_udp) ? ETH_P_IBOE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) (ip_version == 4 ? ETH_P_IP : ETH_P_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) mlx->sched_prio = cpu_to_be16(pcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) memcpy(&in6, sgid.raw, sizeof(in6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (!is_vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) sqp->ud_header.eth.type = cpu_to_be16(ether_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) sqp->ud_header.lrh.virtual_lane =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) !qp->ibqp.qp_num ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 15 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) sl_to_vl(to_mdev(ib_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) sqp->ud_header.lrh.service_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) qp->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (!qp->ibqp.qp_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) &pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) &pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) sqp->qkey : wr->remote_qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) pr_err("built UD header of size %d:\n", header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) for (i = 0; i < header_size / 4; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (i % 8 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) pr_err(" [%02x] ", i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) pr_cont(" %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if ((i + 1) % 8 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) pr_err("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * Inline data segments may not cross a 64 byte boundary. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) * our UD header is bigger than the space available up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) * next 64 byte boundary in the WQE, use two inline data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) * segments to hold the UD header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) spc = MLX4_INLINE_ALIGN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (header_size <= spc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) inl->byte_count = cpu_to_be32(1 << 31 | header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) memcpy(inl + 1, sqp->header_buf, header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) inl->byte_count = cpu_to_be32(1 << 31 | spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) memcpy(inl + 1, sqp->header_buf, spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) inl = (void *) (inl + 1) + spc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * Need a barrier here to make sure all the data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * visible before the byte_count field is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * Otherwise the HCA prefetcher could grab the 64-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * chunk with this inline segment and get a valid (!=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) * 0xffffffff) byte count but stale data, and end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) * generating a packet with bad headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) * The first inline segment's byte_count field doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) * need a barrier, because it comes after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) * control/MLX segment and therefore is at an offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) * of 16 mod 64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) i = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) *mlx_seg_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) unsigned cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) struct mlx4_ib_cq *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) cur = wq->head - wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (likely(cur + nreq < wq->max_post))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) cq = to_mcq(ib_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) spin_lock(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) cur = wq->head - wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) spin_unlock(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) return cur + nreq >= wq->max_post;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) static __be32 convert_access(int acc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) return (acc & IB_ACCESS_REMOTE_ATOMIC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) (acc & IB_ACCESS_REMOTE_WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) (acc & IB_ACCESS_REMOTE_READ ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) const struct ib_reg_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) struct mlx4_ib_mr *mr = to_mmr(wr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) fseg->flags = convert_access(wr->access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) fseg->mem_key = cpu_to_be32(wr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) fseg->buf_list = cpu_to_be64(mr->page_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) fseg->start_addr = cpu_to_be64(mr->ibmr.iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) fseg->reg_len = cpu_to_be64(mr->ibmr.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) fseg->offset = 0; /* XXX -- is this just for ZBVA? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) fseg->reserved[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) fseg->reserved[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) memset(iseg, 0, sizeof(*iseg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) iseg->mem_key = cpu_to_be32(rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) u64 remote_addr, u32 rkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) rseg->raddr = cpu_to_be64(remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) rseg->rkey = cpu_to_be32(rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) rseg->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) const struct ib_atomic_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) aseg->swap_add = cpu_to_be64(wr->swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) aseg->compare = cpu_to_be64(wr->compare_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) aseg->swap_add = cpu_to_be64(wr->compare_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) aseg->compare = cpu_to_be64(wr->compare_add_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) aseg->swap_add = cpu_to_be64(wr->compare_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) aseg->compare = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) const struct ib_atomic_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) aseg->swap_add = cpu_to_be64(wr->swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) aseg->compare = cpu_to_be64(wr->compare_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) aseg->compare_mask = cpu_to_be64(wr->compare_add_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) const struct ib_ud_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) dseg->dqpn = cpu_to_be32(wr->remote_qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) dseg->qkey = cpu_to_be32(wr->remote_qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) struct mlx4_wqe_datagram_seg *dseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) const struct ib_ud_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) enum mlx4_ib_qp_type qpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) union mlx4_ext_av *av = &to_mah(wr->ah)->av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) struct mlx4_av sqp_av = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) int port = *((u8 *) &av->ib.port_pd) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) /* force loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) cpu_to_be32(0xf0000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (qpt == MLX4_IB_QPT_PROXY_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) /* Use QKEY from the QP context, which is set by master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) unsigned *mlx_seg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) struct mlx4_wqe_inline_seg *inl = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) struct mlx4_ib_tunnel_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) struct mlx4_ib_ah *ah = to_mah(wr->ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) int spc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) memcpy(&hdr.av, &ah->av, sizeof hdr.av);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) hdr.pkey_index = cpu_to_be16(wr->pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) hdr.qkey = cpu_to_be32(wr->remote_qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) memcpy(hdr.mac, ah->av.eth.mac, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) hdr.vlan = ah->av.eth.vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) spc = MLX4_INLINE_ALIGN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) if (sizeof (hdr) <= spc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) memcpy(inl + 1, &hdr, sizeof (hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) memcpy(inl + 1, &hdr, spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) inl->byte_count = cpu_to_be32(1 << 31 | spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) inl = (void *) (inl + 1) + spc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) i = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) *mlx_seg_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) static void set_mlx_icrc_seg(void *dseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) u32 *t = dseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) struct mlx4_wqe_inline_seg *iseg = dseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) t[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) * Need a barrier here before writing the byte_count field to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * make sure that all the data is visible before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) * byte_count field is set. Otherwise, if the segment begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) * a new cacheline, the HCA prefetcher could grab the 64-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) * chunk and get a valid (!= * 0xffffffff) byte count but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) * stale data, and end up sending the wrong data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) iseg->byte_count = cpu_to_be32((1 << 31) | 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) dseg->lkey = cpu_to_be32(sg->lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) dseg->addr = cpu_to_be64(sg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) * Need a barrier here before writing the byte_count field to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) * make sure that all the data is visible before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) * byte_count field is set. Otherwise, if the segment begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) * a new cacheline, the HCA prefetcher could grab the 64-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) * chunk and get a valid (!= * 0xffffffff) byte count but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) * stale data, and end up sending the wrong data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) dseg->byte_count = cpu_to_be32(sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) dseg->byte_count = cpu_to_be32(sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) dseg->lkey = cpu_to_be32(sg->lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) dseg->addr = cpu_to_be64(sg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) *blh = cpu_to_be32(1 << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) memcpy(wqe->header, wr->header, wr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) *lso_seg_len = halign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) static __be32 send_ieth(const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) return wr->ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) return cpu_to_be32(wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static void add_zero_len_inline(void *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) struct mlx4_wqe_inline_seg *inl = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) memset(wqe, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) inl->byte_count = cpu_to_be32(1 << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) const struct ib_send_wr **bad_wr, bool drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) struct mlx4_ib_qp *qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) void *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) struct mlx4_wqe_ctrl_seg *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) struct mlx4_wqe_data_seg *dseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) int nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) unsigned ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) unsigned seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) __be32 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) __be32 *lso_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) __be32 lso_hdr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) __be32 blh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) struct mlx4_ib_sqp *sqp = qp->sqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) if (sqp->roce_v2_gsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) enum ib_gid_type gid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) union ib_gid gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) if (!fill_gid_by_hw_index(mdev, qp->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) ah->av.ib.gid_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) &gid, &gid_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) to_mqp(sqp->roce_v2_gsi) : qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) ah->av.ib.gid_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) spin_lock_irqsave(&qp->sq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) !drain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) nreq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) ind = qp->sq_next_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) for (nreq = 0; wr; ++nreq, wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) lso_wqe = &dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) blh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (unlikely(wr->num_sge > qp->sq.max_gs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) ctrl->srcrb_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) (wr->send_flags & IB_SEND_SIGNALED ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) (wr->send_flags & IB_SEND_SOLICITED ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) ((wr->send_flags & IB_SEND_IP_CSUM) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) qp->sq_signal_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) ctrl->imm = send_ieth(wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) wqe += sizeof *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) size = sizeof *ctrl / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) switch (qp->mlx4_ib_qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) case MLX4_IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) case MLX4_IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) atomic_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) wqe += sizeof (struct mlx4_wqe_raddr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) set_atomic_seg(wqe, atomic_wr(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) wqe += sizeof (struct mlx4_wqe_atomic_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) size += (sizeof (struct mlx4_wqe_raddr_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) sizeof (struct mlx4_wqe_atomic_seg)) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) atomic_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) wqe += sizeof (struct mlx4_wqe_raddr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) set_masked_atomic_seg(wqe, atomic_wr(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) size += (sizeof (struct mlx4_wqe_raddr_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) rdma_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) wqe += sizeof (struct mlx4_wqe_raddr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) ctrl->srcrb_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) wqe += sizeof (struct mlx4_wqe_local_inval_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) ctrl->srcrb_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) set_reg_seg(wqe, reg_wr(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) wqe += sizeof(struct mlx4_wqe_fmr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) /* No extra segments required for sends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) case MLX4_IB_QPT_TUN_SMI_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) &seglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) wqe += seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) size += seglen / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) case MLX4_IB_QPT_TUN_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) case MLX4_IB_QPT_TUN_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) /* this is a UD qp used in MAD responses to slaves. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) set_datagram_seg(wqe, ud_wr(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) /* set the forced-loopback bit in the data seg av */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) *(__be32 *) wqe |= cpu_to_be32(0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) wqe += sizeof (struct mlx4_wqe_datagram_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) case MLX4_IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) set_datagram_seg(wqe, ud_wr(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) wqe += sizeof (struct mlx4_wqe_datagram_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) if (wr->opcode == IB_WR_LSO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) &lso_hdr_sz, &blh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) lso_wqe = (__be32 *) wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) wqe += seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) size += seglen / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) case MLX4_IB_QPT_PROXY_SMI_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) &seglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) wqe += seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) size += seglen / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) /* to start tunnel header on a cache-line boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) add_zero_len_inline(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) wqe += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) build_tunnel_header(ud_wr(wr), wqe, &seglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) wqe += seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) size += seglen / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) case MLX4_IB_QPT_PROXY_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) case MLX4_IB_QPT_PROXY_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) /* If we are tunneling special qps, this is a UD qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) * In this case we first add a UD segment targeting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) * the tunnel qp, and then add a header with address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) * information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) ud_wr(wr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) qp->mlx4_ib_qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) wqe += sizeof (struct mlx4_wqe_datagram_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) build_tunnel_header(ud_wr(wr), wqe, &seglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) wqe += seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) size += seglen / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) case MLX4_IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) case MLX4_IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) wqe += seglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) size += seglen / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) * Write data segments in reverse order, so as to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) * overwrite cacheline stamp last within each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) * cacheline. This avoids issues with WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) * prefetching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) dseg = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) dseg += wr->num_sge - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) /* Add one more inline data segment for ICRC for MLX sends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) qp->mlx4_ib_qp_type &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) set_mlx_icrc_seg(dseg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) size += sizeof (struct mlx4_wqe_data_seg) / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) set_data_seg(dseg, wr->sg_list + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) * Possibly overwrite stamping in cacheline with LSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) * segment only after making sure all data segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) * are written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) *lso_wqe = lso_hdr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) MLX4_WQE_CTRL_FENCE : 0) | size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) * Make sure descriptor is fully written before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) * setting ownership bit (because HW can start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) * executing as soon as we do).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) * We can improve latency by not stamping the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) * send queue WQE until after ringing the doorbell, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) * only stamp here if there are still more WQEs to post.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) if (wr->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) stamp_send_wqe(qp, ind + qp->sq_spare_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) ind++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) if (likely(nreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) qp->sq.head += nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) * Make sure that descriptors are written before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) * doorbell record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) writel_relaxed(qp->doorbell_qpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) qp->sq_next_wqe = ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) spin_unlock_irqrestore(&qp->sq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) return _mlx4_ib_post_send(ibqp, wr, bad_wr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) const struct ib_recv_wr **bad_wr, bool drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) struct mlx4_ib_qp *qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) struct mlx4_wqe_data_seg *scat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) int nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) int ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) int max_gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) max_gs = qp->rq.max_gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) spin_lock_irqsave(&qp->rq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) !drain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) nreq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) for (nreq = 0; wr; ++nreq, wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) if (unlikely(wr->num_sge > qp->rq.max_gs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) scat = get_recv_wqe(qp, ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) ib_dma_sync_single_for_device(ibqp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) qp->sqp_proxy_rcv[ind].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) sizeof (struct mlx4_ib_proxy_sqp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) scat->byte_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) /* use dma lkey from upper layer entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) scat->lkey = cpu_to_be32(wr->sg_list->lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) scat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) max_gs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) for (i = 0; i < wr->num_sge; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) __set_data_seg(scat + i, wr->sg_list + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) if (i < max_gs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) scat[i].byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) scat[i].addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) qp->rq.wrid[ind] = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) if (likely(nreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) qp->rq.head += nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) * Make sure that descriptors are written before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) * doorbell record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) spin_unlock_irqrestore(&qp->rq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) switch (mlx4_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) case MLX4_QP_STATE_RST: return IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) case MLX4_QP_STATE_INIT: return IB_QPS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) case MLX4_QP_STATE_RTR: return IB_QPS_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) case MLX4_QP_STATE_RTS: return IB_QPS_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) case MLX4_QP_STATE_SQ_DRAINING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) case MLX4_QP_STATE_SQD: return IB_QPS_SQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) case MLX4_QP_STATE_SQER: return IB_QPS_SQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) case MLX4_QP_STATE_ERR: return IB_QPS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) default: return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) switch (mlx4_mig_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) case MLX4_QP_PM_ARMED: return IB_MIG_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) case MLX4_QP_PM_REARM: return IB_MIG_REARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) default: return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) static int to_ib_qp_access_flags(int mlx4_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) int ib_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) if (mlx4_flags & MLX4_QP_BIT_RRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) ib_flags |= IB_ACCESS_REMOTE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) if (mlx4_flags & MLX4_QP_BIT_RWE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) ib_flags |= IB_ACCESS_REMOTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) if (mlx4_flags & MLX4_QP_BIT_RAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) return ib_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) struct rdma_ah_attr *ah_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) struct mlx4_qp_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) struct mlx4_dev *dev = ibdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) memset(ah_attr, 0, sizeof(*ah_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) if (port_num == 0 || port_num > dev->caps.num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) ((path->sched_queue & 4) << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) rdma_ah_set_port_num(ah_attr, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) rdma_ah_set_static_rate(ah_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) path->static_rate ? path->static_rate - 5 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) if (path->grh_mylmc & (1 << 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) rdma_ah_set_grh(ah_attr, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) be32_to_cpu(path->tclass_flowlabel) & 0xfffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) path->mgid_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) path->hop_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) (be32_to_cpu(path->tclass_flowlabel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) >> 20) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) rdma_ah_set_dgid_raw(ah_attr, path->rgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) struct ib_qp_init_attr *qp_init_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) struct mlx4_ib_qp *qp = to_mqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) struct mlx4_qp_context context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) int mlx4_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) if (ibqp->rwq_ind_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) mutex_lock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) if (qp->state == IB_QPS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) qp_attr->qp_state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) mlx4_state = be32_to_cpu(context.flags) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) qp->state = to_ib_qp_state(mlx4_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) qp_attr->qp_state = qp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) qp_attr->path_mtu = context.mtu_msgmax >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) qp_attr->path_mig_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) qp_attr->qkey = be32_to_cpu(context.qkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) qp_attr->qp_access_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) to_ib_qp_access_flags(be32_to_cpu(context.params2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) qp_attr->alt_port_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) if (qp_attr->qp_state == IB_QPS_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) qp_attr->port_num = qp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) qp_attr->max_dest_rd_atomic =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) qp_attr->min_rnr_timer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) qp_attr->timeout = context.pri_path.ackto >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) qp_attr->alt_timeout = context.alt_path.ackto >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) qp_attr->cur_qp_state = qp_attr->qp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) qp_attr->cap.max_recv_sge = qp->rq.max_gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) if (!ibqp->uobject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) qp_attr->cap.max_send_sge = qp->sq.max_gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) qp_attr->cap.max_send_wr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) qp_attr->cap.max_send_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * We don't support inline sends for kernel QPs (yet), and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) * don't know what userspace's value should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) qp_attr->cap.max_inline_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) qp_init_attr->cap = qp_attr->cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) qp_init_attr->create_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (qp->flags & MLX4_IB_QP_LSO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) if (qp->flags & MLX4_IB_QP_NETIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) qp_init_attr->sq_sig_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) mutex_unlock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) struct ib_wq_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) struct mlx4_dev *dev = to_mdev(pd->device)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) struct ib_qp_init_attr ib_qp_init_attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) struct mlx4_ib_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) struct mlx4_ib_create_wq ucmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) int err, required_cmd_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) if (!udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) sizeof(ucmd.comp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) if (udata->inlen < required_cmd_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) pr_debug("invalid inlen\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (udata->inlen > sizeof(ucmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) !ib_is_udata_cleared(udata, sizeof(ucmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) udata->inlen - sizeof(ucmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) pr_debug("inlen is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) if (udata->outlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) if (init_attr->wq_type != IB_WQT_RQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) pr_debug("unsupported wq type %d\n", init_attr->wq_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) pr_debug("unsupported create_flags %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) init_attr->create_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) qp = kzalloc(sizeof(*qp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) if (!qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) mutex_init(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) qp->pri.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) qp->alt.vid = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) ib_qp_init_attr.qp_context = init_attr->wq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) ib_qp_init_attr.cap.max_recv_sge = init_attr->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) ib_qp_init_attr.recv_cq = init_attr->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) err = create_rq(pd, &ib_qp_init_attr, udata, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) qp->ibwq.event_handler = init_attr->event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) qp->ibwq.wq_num = qp->mqp.qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) qp->ibwq.state = IB_WQS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) return &qp->ibwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) static int ib_wq2qp_state(enum ib_wq_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) case IB_WQS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) return IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) case IB_WQS_RDY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) return IB_QPS_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) return IB_QPS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) enum ib_qp_state qp_cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) enum ib_qp_state qp_new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) int attr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) /* ib_qp.state represents the WQ HW state while ib_wq.state represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) * the WQ logic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) qp_cur_state = qp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) qp_new_state = ib_wq2qp_state(new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) if (ib_wq2qp_state(new_state) == qp_cur_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (new_state == IB_WQS_RDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) struct ib_qp_attr attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) attr.port_num = qp->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) attr_mask = IB_QP_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) attr_mask, IB_QPS_RESET, IB_QPS_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) ibwq->wq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) qp_cur_state = IB_QPS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) attr_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) qp_cur_state, qp_new_state, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) if (err && (qp_cur_state == IB_QPS_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) qp_new_state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) attr_mask, IB_QPS_INIT, IB_QPS_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) udata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) ibwq->wq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) qp_new_state = IB_QPS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) qp->state = qp_new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) u32 wq_attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) struct mlx4_ib_modify_wq ucmd = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) size_t required_cmd_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) enum ib_wq_state cur_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) required_cmd_sz = offsetof(typeof(ucmd), reserved) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) sizeof(ucmd.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) if (udata->inlen < required_cmd_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) if (udata->inlen > sizeof(ucmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) !ib_is_udata_cleared(udata, sizeof(ucmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) udata->inlen - sizeof(ucmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) if (ucmd.comp_mask || ucmd.reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) if (wq_attr_mask & IB_WQ_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) cur_state = wq_attr->curr_wq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) new_state = wq_attr->wq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) if ((new_state == IB_WQS_ERR) && (cur_state == IB_WQS_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) /* Need to protect against the parent RSS which also may modify WQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) mutex_lock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) /* Can update HW state only if a RSS QP has already associated to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) * WQ, so we can apply its port on the WQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) if (qp->rss_usecnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) err = _mlx4_ib_modify_wq(ibwq, new_state, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) ibwq->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) mutex_unlock(&qp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) if (qp->counter_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) mlx4_ib_free_qp_counter(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) struct ib_rwq_ind_table_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) struct ib_device *device = rwq_ind_table->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) unsigned int base_wqn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) size_t min_resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) if (udata->inlen > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) !ib_is_udata_cleared(udata, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) udata->inlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) if (udata->outlen && udata->outlen < min_resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) if (ind_tbl_size >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) device->attrs.rss_caps.max_rwq_indirection_table_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) ind_tbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) device->attrs.rss_caps.max_rwq_indirection_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) base_wqn = init_attr->ind_tbl[0]->wq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) if (base_wqn % ind_tbl_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) base_wqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) for (i = 1; i < ind_tbl_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) pr_debug("indirection table's WQNs aren't consecutive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) if (udata->outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) resp.response_length = offsetof(typeof(resp), response_length) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) sizeof(resp.response_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) err = ib_copy_to_udata(udata, &resp, resp.response_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) struct mlx4_ib_drain_cqe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) struct ib_cqe cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) struct mlx4_ib_drain_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) complete(&cqe->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) /* This function returns only once the drained WR was completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) static void handle_drain_completion(struct ib_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) struct mlx4_ib_drain_cqe *sdrain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) struct mlx4_dev *mdev = dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) if (cq->poll_ctx == IB_POLL_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) ib_process_cq_direct(cq, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) struct mlx4_ib_cq *mcq = to_mcq(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) bool triggered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) /* Make sure that the CQ handler won't run if wasn't run yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (!mcq->mcq.reset_notify_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) mcq->mcq.reset_notify_added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) triggered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) if (triggered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) /* Wait for any scheduled/running task to be ended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) switch (cq->poll_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) case IB_POLL_SOFTIRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) irq_poll_disable(&cq->iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) irq_poll_enable(&cq->iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) case IB_POLL_WORKQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) cancel_work_sync(&cq->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) /* Run the CQ handler - this makes sure that the drain WR will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) * be processed if wasn't processed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) mcq->mcq.comp(&mcq->mcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) wait_for_completion(&sdrain->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) void mlx4_ib_drain_sq(struct ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) struct ib_cq *cq = qp->send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) struct mlx4_ib_drain_cqe sdrain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) const struct ib_send_wr *bad_swr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) struct ib_rdma_wr swr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) .wr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) .next = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) { .wr_cqe = &sdrain.cqe, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) .opcode = IB_WR_RDMA_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) struct mlx4_ib_dev *dev = to_mdev(qp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) struct mlx4_dev *mdev = dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) sdrain.cqe.done = mlx4_ib_drain_qp_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) init_completion(&sdrain.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) handle_drain_completion(cq, &sdrain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) void mlx4_ib_drain_rq(struct ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) struct ib_cq *cq = qp->recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) struct mlx4_ib_drain_cqe rdrain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) struct ib_recv_wr rwr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) const struct ib_recv_wr *bad_rwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) struct mlx4_ib_dev *dev = to_mdev(qp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) struct mlx4_dev *mdev = dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) rwr.wr_cqe = &rdrain.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) rdrain.cqe.done = mlx4_ib_drain_qp_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) init_completion(&rdrain.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) handle_drain_completion(cq, &rdrain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }