^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* QLogic qedr NIC Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2015-2016 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and /or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <rdma/ib_user_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <rdma/iw_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <rdma/ib_umem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <rdma/ib_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <rdma/ib_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/qed/common_hsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "qedr_hsi_rdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/qed/qed_if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "qedr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "verbs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <rdma/qedr-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "qedr_roce_cm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "qedr_iw_cm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define RDMA_MAX_SGE_PER_SRQ (4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) QEDR_USER_MMAP_IO_WC = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) QEDR_USER_MMAP_PHYS_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) size_t min_len = min_t(size_t, len, udata->outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return ib_copy_to_udata(udata, src, min_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *pkey = QEDR_ROCE_PKEY_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int index, union ib_gid *sgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct qedr_dev *dev = get_qedr_dev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) memset(sgid->raw, 0, sizeof(sgid->raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) sgid->global.interface_id, sgid->global.subnet_prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct qedr_device_attr *qattr = &dev->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct qedr_srq *srq = get_qedr_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) srq_attr->srq_limit = srq->srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) srq_attr->max_wr = qattr->max_srq_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) srq_attr->max_sge = qattr->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int qedr_query_device(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct ib_device_attr *attr, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct qedr_dev *dev = get_qedr_dev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct qedr_device_attr *qattr = &dev->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!dev->rdma_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "qedr_query_device called with invalid params rdma_ctx=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev->rdma_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) memset(attr, 0, sizeof(*attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) attr->fw_ver = qattr->fw_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) attr->sys_image_guid = qattr->sys_image_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) attr->max_mr_size = qattr->max_mr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) attr->page_size_cap = qattr->page_size_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) attr->vendor_id = qattr->vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) attr->vendor_part_id = qattr->vendor_part_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) attr->hw_ver = qattr->hw_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) attr->max_qp = qattr->max_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) IB_DEVICE_RC_RNR_NAK_GEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) attr->device_cap_flags |= IB_DEVICE_XRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) attr->max_send_sge = qattr->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) attr->max_recv_sge = qattr->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) attr->max_sge_rd = qattr->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) attr->max_cq = qattr->max_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) attr->max_cqe = qattr->max_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) attr->max_mr = qattr->max_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) attr->max_mw = qattr->max_mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) attr->max_pd = qattr->max_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) attr->atomic_cap = dev->atomic_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) attr->max_qp_init_rd_atom =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) attr->max_qp_rd_atom =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) attr->max_qp_init_rd_atom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) attr->max_srq = qattr->max_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) attr->max_srq_sge = qattr->max_srq_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) attr->max_srq_wr = qattr->max_srq_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) attr->local_ca_ack_delay = qattr->dev_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) attr->max_pkeys = qattr->max_pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) attr->max_ah = qattr->max_ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u8 *ib_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case 1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *ib_speed = IB_SPEED_SDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *ib_width = IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) case 10000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *ib_speed = IB_SPEED_QDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *ib_width = IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case 20000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *ib_speed = IB_SPEED_DDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *ib_width = IB_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) case 25000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *ib_speed = IB_SPEED_EDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *ib_width = IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case 40000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *ib_speed = IB_SPEED_QDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *ib_width = IB_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) case 50000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *ib_speed = IB_SPEED_HDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *ib_width = IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) case 100000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *ib_speed = IB_SPEED_EDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *ib_width = IB_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Unsupported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *ib_speed = IB_SPEED_SDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *ib_width = IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct qedr_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct qed_rdma_port *rdma_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dev = get_qedr_dev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!dev->rdma_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) DP_ERR(dev, "rdma_ctx is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* *attr being zeroed by the caller, avoid zeroing it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (rdma_port->port_state == QED_RDMA_PORT_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) attr->state = IB_PORT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) attr->state = IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) attr->max_mtu = IB_MTU_4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) attr->lid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) attr->lmc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) attr->sm_lid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) attr->sm_sl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) attr->ip_gids = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) attr->gid_tbl_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) attr->gid_tbl_len = QEDR_MAX_SGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) attr->qkey_viol_cntr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) get_link_speed_and_width(rdma_port->link_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) &attr->active_speed, &attr->active_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) attr->max_msg_sz = rdma_port->max_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) attr->max_vl_num = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct ib_device *ibdev = uctx->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct qedr_alloc_ucontext_resp uresp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct qedr_alloc_ucontext_req ureq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct qedr_dev *dev = get_qedr_dev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct qed_rdma_add_user_out_params oparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct qedr_user_mmap_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (udata->inlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) rc = ib_copy_from_udata(&ureq, udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) min(sizeof(ureq), udata->inlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) DP_ERR(dev, "Problem copying data from user space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ctx->edpm_mode = !!(ureq.context_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) QEDR_ALLOC_UCTX_EDPM_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ctx->dpi = oparams.dpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ctx->dpi_addr = oparams.dpi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ctx->dpi_phys_addr = oparams.dpi_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ctx->dpi_size = oparams.dpi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) entry = kzalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) entry->io_address = ctx->dpi_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) entry->length = ctx->dpi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) entry->dpi = ctx->dpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) entry->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ctx->dpi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ctx->db_mmap_entry = &entry->rdma_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!dev->user_dpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) uresp.dpm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) else if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) QEDR_DPM_TYPE_ROCE_LEGACY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) QEDR_DPM_TYPE_ROCE_EDPM_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) uresp.wids_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) uresp.wid_count = oparams.wid_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) uresp.db_size = ctx->dpi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) uresp.max_send_wr = dev->attr.max_sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) uresp.max_recv_wr = dev->attr.max_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) uresp.max_srq_wr = dev->attr.max_srq_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) uresp.max_cqes = QEDR_MAX_CQES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ctx->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) &ctx->ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!ctx->db_mmap_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct qedr_dev *dev = entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) free_page((unsigned long)entry->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct ib_device *dev = ucontext->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) size_t length = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct rdma_user_mmap_entry *rdma_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct qedr_user_mmap_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u64 pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ibdev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!rdma_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) entry = get_qedr_mmap_entry(rdma_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ibdev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) entry->io_address, length, entry->mmap_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) switch (entry->mmap_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case QEDR_USER_MMAP_IO_WC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pfn = entry->io_address >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) pgprot_writecombine(vma->vm_page_prot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rdma_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) case QEDR_USER_MMAP_PHYS_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rc = vm_insert_page(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) virt_to_page(entry->address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ibdev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) entry->io_address, length, entry->mmap_flag, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rdma_user_mmap_entry_put(rdma_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct ib_device *ibdev = ibpd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct qedr_dev *dev = get_qedr_dev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct qedr_pd *pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u16 pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) udata ? "User Lib" : "Kernel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!dev->rdma_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) DP_ERR(dev, "invalid RDMA context\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pd->pd_id = pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct qedr_alloc_pd_uresp uresp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .pd_id = pd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct qedr_ucontext *context = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) udata, struct qedr_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pd->uctx = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pd->uctx->pd = pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct qedr_dev *dev = get_qedr_dev(ibpd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct qedr_pd *pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void qedr_free_pbl(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct pci_dev *pdev = dev->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (i = 0; i < pbl_info->num_pbls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!pbl[i].va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pbl[i].va, pbl[i].pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) kfree(pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct qedr_pbl_info *pbl_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct pci_dev *pdev = dev->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct qedr_pbl *pbl_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dma_addr_t *pbl_main_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dma_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!pbl_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) for (i = 0; i < pbl_info->num_pbls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pbl_table[i].va = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pbl_table[i].pa = pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Two-Layer PBLs, if we have more than one pbl we need to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * the first one with physical pointers to all of the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for (i = 0; i < pbl_info->num_pbls - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pbl_main_tbl[i] = pbl_table[i + 1].pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return pbl_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) for (i--; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pbl_table[i].va, pbl_table[i].pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) qedr_free_pbl(dev, pbl_info, pbl_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct qedr_pbl_info *pbl_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) u32 num_pbes, int two_layer_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) u32 pbl_capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) u32 pbl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u32 num_pbls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (num_pbes > MAX_PBES_TWO_LAYER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) DP_ERR(dev, "prepare pbl table: too many pages %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) num_pbes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* calculate required pbl page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pbl_size = MIN_FW_PBL_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) NUM_PBES_ON_PAGE(pbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) while (pbl_capacity < num_pbes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) pbl_size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) pbl_capacity = pbl_size / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pbl_capacity = pbl_capacity * pbl_capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) num_pbls++; /* One for the layer0 ( points to the pbls) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) pbl_info->two_layered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* One layered PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) num_pbls = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) roundup_pow_of_two((num_pbes * sizeof(u64))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pbl_info->two_layered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) pbl_info->num_pbls = num_pbls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pbl_info->pbl_size = pbl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) pbl_info->num_pbes = num_pbes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) DP_DEBUG(dev, QEDR_MSG_MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct qedr_pbl *pbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct qedr_pbl_info *pbl_info, u32 pg_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int pbe_cnt, total_num_pbes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct qedr_pbl *pbl_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct ib_block_iter biter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct regpair *pbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!pbl_info->num_pbes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* If we have a two layered pbl, the first pbl points to the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * of the pbls and the first entry lays on the second pbl in the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (pbl_info->two_layered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pbl_tbl = &pbl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pbl_tbl = pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) pbe = (struct regpair *)pbl_tbl->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!pbe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pbe_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) u64 pg_addr = rdma_block_iter_dma_address(&biter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pbe->lo = cpu_to_le32(pg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pbe_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) total_num_pbes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) pbe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (total_num_pbes == pbl_info->num_pbes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* If the given pbl is full storing the pbes, move to next pbl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pbl_tbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pbe = (struct regpair *)pbl_tbl->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pbe_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static int qedr_db_recovery_add(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) void __iomem *db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) void *db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) enum qed_db_rec_width db_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) enum qed_db_rec_space db_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!db_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) db_width, db_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void qedr_db_recovery_del(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) void __iomem *db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void *db_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!db_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* Ignore return code as there is not much we can do about it. Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * log will be printed inside.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static int qedr_copy_cq_uresp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct qedr_cq *cq, struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) u32 db_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct qedr_create_cq_uresp uresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) memset(&uresp, 0, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) uresp.db_offset = db_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) uresp.icid = cq->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (cq->q.db_mmap_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) uresp.db_rec_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static void consume_cqe(struct qedr_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (cq->latest_cqe == cq->toggle_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) cq->latest_cqe = qed_chain_consume(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static inline int qedr_align_cq_entries(int entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) u64 size, aligned_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* We allocate an extra entry that we don't report to the FW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) size = (entries + 1) * QEDR_CQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) aligned_size = ALIGN(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return aligned_size / QEDR_CQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static int qedr_init_user_db_rec(struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct qedr_dev *dev, struct qedr_userq *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) bool requires_db_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct qedr_ucontext *uctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rdma_udata_to_drv_context(udata, struct qedr_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct qedr_user_mmap_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (requires_db_rec == 0 || !uctx->db_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Allocate a page for doorbell recovery, add to mmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!q->db_rec_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) DP_ERR(dev, "get_zeroed_page failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) entry = kzalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto err_free_db_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) entry->address = q->db_rec_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) entry->length = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) &entry->rdma_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto err_free_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) q->db_mmap_entry = &entry->rdma_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) err_free_entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) err_free_db_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) free_page((unsigned long)q->db_rec_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) q->db_rec_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static inline int qedr_init_user_queue(struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct qedr_userq *q, u64 buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) size_t buf_len, bool requires_db_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) int access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) int alloc_and_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) u32 fw_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) q->buf_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) q->buf_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (IS_ERR(q->umem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) PTR_ERR(q->umem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return PTR_ERR(q->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (alloc_and_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (IS_ERR(q->pbl_tbl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) rc = PTR_ERR(q->pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) FW_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!q->pbl_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* mmap the user address used to store doorbell data for recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ib_umem_release(q->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) q->umem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static inline void qedr_init_cq_params(struct qedr_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct qedr_ucontext *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct qedr_dev *dev, int vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int chain_entries, int page_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u64 pbl_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct qed_rdma_create_cq_in_params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) memset(params, 0, sizeof(*params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) params->cnq_id = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) params->cq_size = chain_entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) params->dpi = (ctx) ? ctx->dpi : dev->dpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) params->pbl_num_pages = page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) params->pbl_ptr = pbl_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) params->pbl_two_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cq->db.data.agg_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) cq->db.data.value = cpu_to_le32(cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) writeq(cq->db.raw, cq->db_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct qedr_cq *cq = get_qedr_cq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) unsigned long sflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct qedr_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dev = get_qedr_dev(ibcq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (cq->destroyed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) cq, cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (cq->cq_type == QEDR_CQ_TYPE_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) spin_lock_irqsave(&cq->cq_lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) cq->arm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (flags & IB_CQ_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (flags & IB_CQ_NEXT_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) spin_unlock_irqrestore(&cq->cq_lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct ib_device *ibdev = ibcq->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) udata, struct qedr_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct qed_rdma_destroy_cq_out_params destroy_oparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct qed_rdma_destroy_cq_in_params destroy_iparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct qed_chain_init_params chain_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .mode = QED_CHAIN_MODE_PBL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) .intended_use = QED_CHAIN_USE_TO_CONSUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) .cnt_type = QED_CHAIN_CNT_TYPE_U32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .elem_size = sizeof(union rdma_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct qedr_dev *dev = get_qedr_dev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct qed_rdma_create_cq_in_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct qedr_create_cq_ureq ureq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int vector = attr->comp_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int entries = attr->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct qedr_cq *cq = get_qedr_cq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int chain_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u32 db_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) u64 pbl_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u16 icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) DP_DEBUG(dev, QEDR_MSG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) "create_cq: called from %s. entries=%d, vector=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) udata ? "User Lib" : "Kernel", entries, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (entries > QEDR_MAX_CQES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) entries, QEDR_MAX_CQES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) chain_entries = qedr_align_cq_entries(entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) chain_params.num_elems = chain_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* calc db offset. user will add DPI base, kernel will add db addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) udata->inlen))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) "create cq: problem copying data from user space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!ureq.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) "create cq: cannot create a cq with 0 entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) cq->cq_type = QEDR_CQ_TYPE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ureq.len, true, IB_ACCESS_LOCAL_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) pbl_ptr = cq->q.pbl_tbl->pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) page_cnt = cq->q.pbl_info.num_pbes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) cq->ibcq.cqe = chain_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) cq->q.db_addr = ctx->dpi_addr + db_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) cq->cq_type = QEDR_CQ_TYPE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) &chain_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) page_cnt = qed_chain_get_page_cnt(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) cq->ibcq.cqe = cq->pbl.capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) pbl_ptr, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) cq->icid = icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) cq->sig = QEDR_CQ_MAGIC_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) spin_lock_init(&cq->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) rc = qedr_db_recovery_add(dev, cq->q.db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) &cq->q.db_rec_data->db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) DB_REC_WIDTH_64B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) DB_REC_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* Generate doorbell address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) cq->db.data.icid = cq->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) cq->db_addr = dev->db_addr + db_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) cq->db.data.params = DB_AGG_CMD_MAX <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* point to the very last element, passing it we will toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) cq->latest_cqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) consume_cqe(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) DB_REC_WIDTH_64B, DB_REC_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) DP_DEBUG(dev, QEDR_MSG_CQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) cq->icid, cq, params.cq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) destroy_iparams.icid = cq->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) &destroy_oparams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ib_umem_release(cq->q.umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (cq->q.db_mmap_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dev->ops->common->chain_free(dev->cdev, &cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct qedr_dev *dev = get_qedr_dev(ibcq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct qedr_cq *cq = get_qedr_cq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) #define QEDR_DESTROY_CQ_ITER_DURATION (10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct qedr_dev *dev = get_qedr_dev(ibcq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct qed_rdma_destroy_cq_out_params oparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct qed_rdma_destroy_cq_in_params iparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct qedr_cq *cq = get_qedr_cq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) cq->destroyed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /* GSIs CQs are handled by driver, so they don't exist in the FW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) iparams.icid = cq->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dev->ops->common->chain_free(dev->cdev, &cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ib_umem_release(cq->q.umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (cq->q.db_rec_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) qedr_db_recovery_del(dev, cq->q.db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) &cq->q.db_rec_data->db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* We don't want the IRQ handler to handle a non-existing CQ so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * wait until all CNQ interrupts, if any, are received. This will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * happen and will always happen very fast. If not, then a serious error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * has occured. That is why we can use a long delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * We spin for a short time so we don’t lose time on context switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * in case all the completions are handled in that span. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * we sleep for a while and check again. Since the CNQ may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * associated with (only) the current CPU we use msleep to allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * current CPU to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * The CNQ notification is increased in qedr_irq_handler().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) udelay(QEDR_DESTROY_CQ_ITER_DURATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) iter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) msleep(QEDR_DESTROY_CQ_ITER_DURATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) iter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* Note that we don't need to have explicit code to wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * completion of the event handler because it is invoked from the EQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Since the destroy CQ ramrod has also been received on the EQ we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * be certain that there's no event handler in process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static inline int get_gid_info_from_table(struct ib_qp *ibqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct qed_rdma_modify_qp_in_params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) *qp_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) const struct ib_gid_attr *gid_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) enum rdma_network_type nw_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) u32 ipv4_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) gid_attr = grh->sgid_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) nw_type = rdma_gid_attr_network_type(gid_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) switch (nw_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) case RDMA_NETWORK_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) sizeof(qp_params->sgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) memcpy(&qp_params->dgid.bytes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) &grh->dgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) sizeof(qp_params->dgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) qp_params->roce_mode = ROCE_V2_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) SET_FIELD(qp_params->modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) case RDMA_NETWORK_ROCE_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) sizeof(qp_params->sgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) memcpy(&qp_params->dgid.bytes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) &grh->dgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) sizeof(qp_params->dgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) qp_params->roce_mode = ROCE_V1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) case RDMA_NETWORK_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) qp_params->sgid.ipv4_addr = ipv4_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ipv4_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) qedr_get_ipv4_from_gid(grh->dgid.raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) qp_params->dgid.ipv4_addr = ipv4_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) SET_FIELD(qp_params->modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) qp_params->roce_mode = ROCE_V2_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (qp_params->vlan_id >= VLAN_CFI_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) qp_params->vlan_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct ib_qp_init_attr *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct qedr_device_attr *qattr = &dev->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* QP0... attrs->qp_type == IB_QPT_GSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (attrs->qp_type != IB_QPT_RC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) attrs->qp_type != IB_QPT_GSI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) attrs->qp_type != IB_QPT_XRC_INI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) attrs->qp_type != IB_QPT_XRC_TGT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) "create qp: unsupported qp type=0x%x requested\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) attrs->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (attrs->cap.max_send_wr > qattr->max_sqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) attrs->cap.max_send_wr, qattr->max_sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (attrs->cap.max_inline_data > qattr->max_inline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) attrs->cap.max_inline_data, qattr->max_inline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (attrs->cap.max_send_sge > qattr->max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) attrs->cap.max_send_sge, qattr->max_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (attrs->cap.max_recv_sge > qattr->max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) attrs->cap.max_recv_sge, qattr->max_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* verify consumer QPs are not trying to use GSI QP's CQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * TGT QP isn't associated with RQ/SQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) (attrs->qp_type != IB_QPT_XRC_TGT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) (attrs->qp_type != IB_QPT_XRC_INI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) "create qp: consumer QP cannot use GSI CQs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static int qedr_copy_srq_uresp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct qedr_srq *srq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct qedr_create_srq_uresp uresp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) uresp.srq_id = srq->srq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) DP_ERR(dev, "create srq: problem copying data to user space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static void qedr_copy_rq_uresp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct qedr_create_qp_uresp *uresp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* iWARP requires two doorbells per RQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) uresp->rq_db_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) uresp->rq_db_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) uresp->rq_icid = qp->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (qp->urq.db_mmap_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) uresp->rq_db_rec_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static void qedr_copy_sq_uresp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct qedr_create_qp_uresp *uresp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* iWARP uses the same cid for rq and sq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) uresp->sq_icid = qp->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) uresp->sq_icid = qp->icid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (qp->usq.db_mmap_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) uresp->sq_db_rec_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int qedr_copy_qp_uresp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct qedr_qp *qp, struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct qedr_create_qp_uresp *uresp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) memset(uresp, 0, sizeof(*uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (qedr_qp_has_sq(qp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) qedr_copy_sq_uresp(dev, uresp, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (qedr_qp_has_rq(qp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) qedr_copy_rq_uresp(dev, uresp, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) uresp->qp_id = qp->qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) "create qp: failed a copy to user space with qp icid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static void qedr_set_common_qp_params(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct qedr_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct ib_qp_init_attr *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) spin_lock_init(&qp->q_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) kref_init(&qp->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) init_completion(&qp->iwarp_cm_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) qp->pd = pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) qp->qp_type = attrs->qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) qp->max_inline_data = attrs->cap.max_inline_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) qp->state = QED_ROCE_QP_STATE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) qp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (qedr_qp_has_sq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) qp->sq.max_sges = attrs->cap.max_send_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) qp->sq_cq = get_qedr_cq(attrs->send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) qp->sq.max_sges, qp->sq_cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (attrs->srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) qp->srq = get_qedr_srq(attrs->srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (qedr_qp_has_rq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) qp->rq_cq = get_qedr_cq(attrs->recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) qp->rq.max_sges = attrs->cap.max_recv_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) qp->rq.max_sges, qp->rq_cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) pd->pd_id, qp->qp_type, qp->max_inline_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) qp->sq.max_sges, qp->sq_cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (qedr_qp_has_sq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) qp->sq.db = dev->db_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) qp->sq.db_data.data.icid = qp->icid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) DB_REC_WIDTH_32B, DB_REC_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (qedr_qp_has_rq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) qp->rq.db = dev->db_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) qp->rq.db_data.data.icid = qp->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) DB_REC_WIDTH_32B, DB_REC_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (rc && qedr_qp_has_sq(qp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static int qedr_check_srq_params(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct ib_srq_init_attr *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct qedr_device_attr *qattr = &dev->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (attrs->attr.max_wr > qattr->max_srq_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) attrs->attr.max_wr, qattr->max_srq_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (attrs->attr.max_sge > qattr->max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) attrs->attr.max_sge, qattr->max_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!udata && attrs->srq_type == IB_SRQT_XRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static void qedr_free_srq_user_params(struct qedr_srq *srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ib_umem_release(srq->usrq.umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ib_umem_release(srq->prod_umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct qedr_dev *dev = srq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) hw_srq->virt_prod_pair_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) hw_srq->phy_prod_pair_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static int qedr_init_srq_user_params(struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct qedr_srq *srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct qedr_create_srq_ureq *ureq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ureq->srq_len, false, access, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) sizeof(struct rdma_srq_producers), access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (IS_ERR(srq->prod_umem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ib_umem_release(srq->usrq.umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) DP_ERR(srq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) "create srq: failed ib_umem_get for producer, got %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) PTR_ERR(srq->prod_umem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return PTR_ERR(srq->prod_umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) sg = srq->prod_umem->sg_head.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct ib_srq_init_attr *init_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct qed_chain_init_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .mode = QED_CHAIN_MODE_PBL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) .cnt_type = QED_CHAIN_CNT_TYPE_U32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) dma_addr_t phy_prod_pair_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) u32 num_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) va = dma_alloc_coherent(&dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) sizeof(struct rdma_srq_producers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) &phy_prod_pair_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (!va) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) "create srq: failed to allocate dma memory for producer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) hw_srq->virt_prod_pair_addr = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) params.num_elems = num_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) hw_srq->num_elems = num_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) va, phy_prod_pair_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct qed_rdma_destroy_srq_in_params destroy_in_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct qed_rdma_create_srq_in_params in_params = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct qed_rdma_create_srq_out_params out_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct qedr_create_srq_ureq ureq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) u64 pbl_base_addr, phy_prod_pair_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct qedr_srq_hwq_info *hw_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) u32 page_cnt, page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct qedr_srq *srq = get_qedr_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) "create SRQ called from %s (pd %p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) (udata) ? "User lib" : "kernel", pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) rc = qedr_check_srq_params(dev, init_attr, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) srq->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) hw_srq = &srq->hw_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) spin_lock_init(&srq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) hw_srq->max_wr = init_attr->attr.max_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) hw_srq->max_sges = init_attr->attr.max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) udata->inlen))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) "create srq: problem copying data from user space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) page_cnt = srq->usrq.pbl_info.num_pbes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) pbl_base_addr = srq->usrq.pbl_tbl->pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) page_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct qed_chain *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) pbl = &hw_srq->pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) page_cnt = qed_chain_get_page_cnt(pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) pbl_base_addr = qed_chain_get_pbl_phys(pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) page_size = QED_CHAIN_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) in_params.pd_id = pd->pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) in_params.pbl_base_addr = pbl_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) in_params.prod_pair_addr = phy_prod_pair_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) in_params.num_pages = page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) in_params.page_size = page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (srq->is_xrc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) in_params.is_xrc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) in_params.xrcd_id = xrcd->xrcd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) in_params.cq_cid = cq->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) srq->srq_id = out_params.srq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) rc = qedr_copy_srq_uresp(dev, srq, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) DP_DEBUG(dev, QEDR_MSG_SRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) destroy_in_params.srq_id = srq->srq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) qedr_free_srq_user_params(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) qedr_free_srq_kernel_params(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct qed_rdma_destroy_srq_in_params in_params = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct qedr_srq *srq = get_qedr_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) xa_erase_irq(&dev->srqs, srq->srq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) in_params.srq_id = srq->srq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) in_params.is_xrc = srq->is_xrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (ibsrq->uobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) qedr_free_srq_user_params(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) qedr_free_srq_kernel_params(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) DP_DEBUG(dev, QEDR_MSG_SRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) "destroy srq: destroyed srq with srq_id=0x%0x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) srq->srq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct qed_rdma_modify_srq_in_params in_params = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct qedr_srq *srq = get_qedr_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (attr_mask & IB_SRQ_MAX_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) "modify srq: invalid attribute mask=0x%x specified for %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) attr_mask, srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (attr_mask & IB_SRQ_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (attr->srq_limit >= srq->hw_srq.max_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) attr->srq_limit, srq->hw_srq.max_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) in_params.srq_id = srq->srq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) in_params.wqe_limit = attr->srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) srq->srq_limit = attr->srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) DP_DEBUG(dev, QEDR_MSG_SRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) switch (ib_qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return QED_RDMA_QP_TYPE_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case IB_QPT_XRC_INI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return QED_RDMA_QP_TYPE_XRC_INI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case IB_QPT_XRC_TGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return QED_RDMA_QP_TYPE_XRC_TGT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return QED_RDMA_QP_TYPE_INVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) qedr_init_common_qp_in_params(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct qedr_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct ib_qp_init_attr *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) bool fmr_and_reserved_lkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) struct qed_rdma_create_qp_in_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* QP handle to be written in an async event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) params->stats_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (pd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) params->pd = pd->pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (qedr_qp_has_sq(qp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (qedr_qp_has_rq(qp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (qedr_qp_has_srq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) params->srq_id = qp->srq->srq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) params->use_srq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) params->srq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) params->use_srq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) "qp=%p. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) "sq_addr=0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) "sq_len=%zd, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) "rq_addr=0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) "rq_len=%zd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct qed_rdma_create_qp_out_params *out_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) &qp->usq.pbl_info, FW_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!qp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) &qp->urq.pbl_info, FW_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static void qedr_cleanup_user(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct qedr_ucontext *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (qedr_qp_has_sq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ib_umem_release(qp->usq.umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) qp->usq.umem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (qedr_qp_has_rq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) ib_umem_release(qp->urq.umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) qp->urq.umem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (rdma_protocol_roce(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) kfree(qp->usq.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) kfree(qp->urq.pbl_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (qp->usq.db_rec_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) qedr_db_recovery_del(dev, qp->usq.db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) &qp->usq.db_rec_data->db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (qp->urq.db_rec_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) qedr_db_recovery_del(dev, qp->urq.db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) &qp->urq.db_rec_data->db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) &qp->urq.db_rec_db2_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) static int qedr_create_user_qp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) struct ib_pd *ibpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) struct ib_qp_init_attr *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct qed_rdma_create_qp_in_params in_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct qed_rdma_create_qp_out_params out_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct qedr_create_qp_uresp uresp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct qedr_create_qp_ureq ureq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct qedr_ucontext *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct qedr_pd *pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) qp->create_type = QEDR_QP_CREATE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (ibpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ctx = pd->uctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) udata->inlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) DP_ERR(dev, "Problem copying data from user space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (qedr_qp_has_sq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* SQ - read access only (0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) ureq.sq_len, true, 0, alloc_and_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (qedr_qp_has_rq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /* RQ - read access only (0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) ureq.rq_len, true, 0, alloc_and_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) memset(&in_params, 0, sizeof(in_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) in_params.qp_handle_lo = ureq.qp_handle_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) in_params.qp_handle_hi = ureq.qp_handle_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (qp->qp_type == IB_QPT_XRC_TGT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) in_params.xrcd_id = xrcd->xrcd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) in_params.qp_handle_lo = qp->qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) in_params.use_srq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (qedr_qp_has_sq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (qedr_qp_has_rq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) &in_params, &out_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (!qp->qed_qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) qedr_iwarp_populate_user_qp(dev, qp, &out_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) qp->qp_id = out_params.qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) qp->icid = out_params.icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* db offset was calculated in copy_qp_uresp, now set in the user q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (qedr_qp_has_sq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) qp->sq.max_wr = attrs->cap.max_send_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) &qp->usq.db_rec_data->db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) DB_REC_WIDTH_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) DB_REC_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (qedr_qp_has_rq(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) qp->rq.max_wr = attrs->cap.max_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) &qp->urq.db_rec_data->db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) DB_REC_WIDTH_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) DB_REC_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* calculate the db_rec_db2 data since it is constant so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * need to reflect from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) qp->urq.db_rec_db2_data.data.value =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) &qp->urq.db_rec_db2_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) DB_REC_WIDTH_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) DB_REC_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) qedr_qp_user_print(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) qedr_cleanup_user(dev, ctx, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) qp->sq.db = dev->db_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) qp->sq.db_data.data.icid = qp->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) rc = qedr_db_recovery_add(dev, qp->sq.db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) &qp->sq.db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) DB_REC_WIDTH_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) DB_REC_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) qp->rq.db = dev->db_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) qp->rq.db_data.data.icid = qp->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) qp->rq.iwarp_db2 = dev->db_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) qp->rq.iwarp_db2_data.data.icid = qp->icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) rc = qedr_db_recovery_add(dev, qp->rq.db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) &qp->rq.db_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) DB_REC_WIDTH_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) DB_REC_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) &qp->rq.iwarp_db2_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) DB_REC_WIDTH_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) DB_REC_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) qedr_roce_create_kernel_qp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) struct qed_rdma_create_qp_in_params *in_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) u32 n_sq_elems, u32 n_rq_elems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct qed_rdma_create_qp_out_params out_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct qed_chain_init_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) .mode = QED_CHAIN_MODE_PBL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) .cnt_type = QED_CHAIN_CNT_TYPE_U32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) params.num_elems = n_sq_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) params.elem_size = QEDR_SQE_ELEMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) params.num_elems = n_rq_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) params.elem_size = QEDR_RQE_ELEMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) in_params, &out_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (!qp->qed_qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) qp->qp_id = out_params.qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) qp->icid = out_params.icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return qedr_set_roce_db_info(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct qed_rdma_create_qp_in_params *in_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) u32 n_sq_elems, u32 n_rq_elems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct qed_rdma_create_qp_out_params out_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) struct qed_chain_init_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) .mode = QED_CHAIN_MODE_PBL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) .cnt_type = QED_CHAIN_CNT_TYPE_U32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) QEDR_SQE_ELEMENT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) QED_CHAIN_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) QED_CHAIN_MODE_PBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) QEDR_RQE_ELEMENT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) QED_CHAIN_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) QED_CHAIN_MODE_PBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) in_params, &out_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!qp->qed_qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /* Now we allocate the chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) params.num_elems = n_sq_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) params.elem_size = QEDR_SQE_ELEMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) params.ext_pbl_virt = out_params.sq_pbl_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) params.ext_pbl_phys = out_params.sq_pbl_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) params.num_elems = n_rq_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) params.elem_size = QEDR_RQE_ELEMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) params.ext_pbl_virt = out_params.rq_pbl_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) params.ext_pbl_phys = out_params.rq_pbl_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) qp->qp_id = out_params.qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) qp->icid = out_params.icid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) return qedr_set_iwarp_db_info(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) kfree(qp->wqe_wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) kfree(qp->rqe_wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /* GSI qp is not registered to db mechanism so no need to delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (qp->qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!qp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) &qp->rq.iwarp_db2_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static int qedr_create_kernel_qp(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct ib_pd *ibpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) struct ib_qp_init_attr *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct qed_rdma_create_qp_in_params in_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct qedr_pd *pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) u32 n_rq_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) u32 n_sq_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) u32 n_sq_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) memset(&in_params, 0, sizeof(in_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) qp->create_type = QEDR_QP_CREATE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * the ring. The ring should allow at least a single WR, even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * user requested none, due to allocation issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * We should add an extra WR since the prod and cons indices of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * wqe_wr_id are managed in such a way that the WQ is considered full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * when (prod+1)%max_wr==cons. We currently don't do that because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * double the number of entries due an iSER issue that pushes far more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * WRs than indicated. If we decline its ib_post_send() then we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * error prints in the dmesg we'd like to avoid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) dev->attr.max_sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (!qp->wqe_wr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) /* QP handle to be written in CQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * the ring. There ring should allow at least a single WR, even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) * user requested none, due to allocation issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /* Allocate driver internal RQ array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (!qp->rqe_wr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) "create qp: failed RQ shadow memory allocation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) kfree(qp->wqe_wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) n_sq_entries = attrs->cap.max_send_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) n_sq_entries = max_t(u32, n_sq_entries, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) n_sq_elems, n_rq_elems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) n_sq_elems, n_rq_elems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) qedr_cleanup_kernel(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) struct qedr_ucontext *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) rdma_udata_to_drv_context(udata, struct qedr_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (qp->qp_type != IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (qp->create_type == QEDR_QP_CREATE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) qedr_cleanup_user(dev, ctx, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) qedr_cleanup_kernel(dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct ib_qp_init_attr *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) struct qedr_xrcd *xrcd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) struct qedr_pd *pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) struct qedr_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) struct qedr_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) struct ib_qp *ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (attrs->qp_type == IB_QPT_XRC_TGT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) xrcd = get_qedr_xrcd(attrs->xrcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) dev = get_qedr_dev(xrcd->ibxrcd.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) dev = get_qedr_dev(ibpd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) udata ? "user library" : "kernel", pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) udata ? "user library" : "kernel", attrs->event_handler, pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) get_qedr_cq(attrs->send_cq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) get_qedr_cq(attrs->send_cq)->icid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) get_qedr_cq(attrs->recv_cq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) qp = kzalloc(sizeof(*qp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) DP_ERR(dev, "create qp: failed allocating memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) qedr_set_common_qp_params(dev, qp, pd, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (attrs->qp_type == IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) ibqp = qedr_create_gsi_qp(dev, attrs, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (IS_ERR(ibqp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (udata || xrcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) goto out_free_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) qp->ibqp.qp_num = qp->qp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) goto out_free_qp_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) out_free_qp_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) qedr_free_qp_resources(dev, qp, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) out_free_qp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) switch (qp_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) case QED_ROCE_QP_STATE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) case QED_ROCE_QP_STATE_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return IB_QPS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) case QED_ROCE_QP_STATE_RTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return IB_QPS_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) case QED_ROCE_QP_STATE_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return IB_QPS_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) case QED_ROCE_QP_STATE_SQD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return IB_QPS_SQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) case QED_ROCE_QP_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return IB_QPS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) case QED_ROCE_QP_STATE_SQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) return IB_QPS_SQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return IB_QPS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) static enum qed_roce_qp_state qedr_get_state_from_ibqp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) enum ib_qp_state qp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) switch (qp_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) case IB_QPS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return QED_ROCE_QP_STATE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) case IB_QPS_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return QED_ROCE_QP_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) case IB_QPS_RTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return QED_ROCE_QP_STATE_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) case IB_QPS_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return QED_ROCE_QP_STATE_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) case IB_QPS_SQD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return QED_ROCE_QP_STATE_SQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) case IB_QPS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) return QED_ROCE_QP_STATE_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) return QED_ROCE_QP_STATE_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) qed_chain_reset(&qph->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) qph->prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) qph->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) qph->wqe_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) qph->db_data.data.value = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) static int qedr_update_qp_state(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) enum qed_roce_qp_state cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) enum qed_roce_qp_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (new_state == cur_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) switch (cur_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) case QED_ROCE_QP_STATE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) case QED_ROCE_QP_STATE_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) qp->prev_wqe_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) qedr_reset_qp_hwq_info(&qp->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) qedr_reset_qp_hwq_info(&qp->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) case QED_ROCE_QP_STATE_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) case QED_ROCE_QP_STATE_RTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Update doorbell (in case post_recv was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * done before move to RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (rdma_protocol_roce(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) writel(qp->rq.db_data.raw, qp->rq.db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) case QED_ROCE_QP_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /* Invalid state change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) case QED_ROCE_QP_STATE_RTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) /* RTR->XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) case QED_ROCE_QP_STATE_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) case QED_ROCE_QP_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) /* Invalid state change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) case QED_ROCE_QP_STATE_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* RTS->XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) case QED_ROCE_QP_STATE_SQD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) case QED_ROCE_QP_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) /* Invalid state change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) case QED_ROCE_QP_STATE_SQD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /* SQD->XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) case QED_ROCE_QP_STATE_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) case QED_ROCE_QP_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) /* Invalid state change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) case QED_ROCE_QP_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /* ERR->XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) case QED_ROCE_QP_STATE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if ((qp->rq.prod != qp->rq.cons) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) (qp->sq.prod != qp->sq.cons)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) DP_NOTICE(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) qp->rq.prod, qp->rq.cons, qp->sq.prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) qp->sq.cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) int attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) struct qedr_qp *qp = get_qedr_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) struct qed_rdma_modify_qp_in_params qp_params = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) enum ib_qp_state old_qp_state, new_qp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) enum qed_roce_qp_state cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) DP_DEBUG(dev, QEDR_MSG_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) attr->qp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) old_qp_state = qedr_get_ibqp_state(qp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (attr_mask & IB_QP_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) new_qp_state = attr->qp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) new_qp_state = old_qp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (rdma_protocol_roce(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) ibqp->qp_type, attr_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) "modify qp: invalid attribute mask=0x%x specified for\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) attr_mask, qp->qp_id, ibqp->qp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) old_qp_state, new_qp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Translate the masks... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (attr_mask & IB_QP_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) qp_params.sqd_async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (attr_mask & IB_QP_PKEY_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) if (attr_mask & IB_QP_QKEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) qp->qkey = attr->qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (attr_mask & IB_QP_ACCESS_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) qp_params.incoming_rdma_read_en = attr->qp_access_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) IB_ACCESS_REMOTE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) qp_params.incoming_rdma_write_en = attr->qp_access_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) IB_ACCESS_REMOTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) qp_params.incoming_atomic_en = attr->qp_access_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) IB_ACCESS_REMOTE_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (attr_mask & IB_QP_PATH_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (attr->path_mtu < IB_MTU_256 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) attr->path_mtu > IB_MTU_4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) ib_mtu_enum_to_int(iboe_get_mtu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) (dev->ndev->mtu)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (!qp->mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) qp->mtu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) qp_params.traffic_class_tos = grh->traffic_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) qp_params.flow_label = grh->flow_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) qp_params.hop_limit_ttl = grh->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) qp->sgid_idx = grh->sgid_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) "modify qp: problems with GID index %d (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) grh->sgid_index, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) rc = qedr_get_dmac(dev, &attr->ah_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) qp_params.remote_mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) qp_params.use_local_mac = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) qp_params.remote_mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) qp_params.mtu = qp->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) qp_params.lb_indication = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) if (!qp_params.mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* Stay with current MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (qp->mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) qp_params.mtu = qp->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) qp_params.mtu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (attr_mask & IB_QP_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) /* The received timeout value is an exponent used like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * "12.7.34 LOCAL ACK TIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) * Value representing the transport (ACK) timeout for use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) * the remote, expressed as: 4.096 * 2^timeout [usec]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * The FW expects timeout in msec so we need to divide the usec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) * The value of zero means infinite so we use a 'max_t' to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) * sure that sub 1 msec values will be configured as 1 msec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (attr->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) qp_params.ack_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 1 << max_t(int, attr->timeout - 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) qp_params.ack_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (attr_mask & IB_QP_RETRY_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) qp_params.retry_cnt = attr->retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (attr_mask & IB_QP_RNR_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) qp_params.rnr_retry_cnt = attr->rnr_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (attr_mask & IB_QP_RQ_PSN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) qp_params.rq_psn = attr->rq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) qp->rq_psn = attr->rq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) "unsupported max_rd_atomic=%d, supported=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) attr->max_rd_atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) dev->attr.max_qp_req_rd_atomic_resc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) qp_params.max_rd_atomic_req = attr->max_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (attr_mask & IB_QP_MIN_RNR_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (attr_mask & IB_QP_SQ_PSN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) qp_params.sq_psn = attr->sq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) qp->sq_psn = attr->sq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (attr->max_dest_rd_atomic >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) dev->attr.max_qp_resp_rd_atomic_resc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) "unsupported max_dest_rd_atomic=%d, supported=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) attr->max_dest_rd_atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) dev->attr.max_qp_resp_rd_atomic_resc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) if (attr_mask & IB_QP_DEST_QPN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) SET_FIELD(qp_params.modify_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) qp_params.dest_qp = attr->dest_qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) qp->dest_qp_num = attr->dest_qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) cur_state = qp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) /* Update the QP state before the actual ramrod to prevent a race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * fast path. Modifying the QP state to error will cause the device to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * flush the CQEs and while polling the flushed CQEs will considered as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * a potential issue if the QP isn't in error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) qp->state = QED_ROCE_QP_STATE_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (qp->qp_type != IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) qp->qed_qp, &qp_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (attr_mask & IB_QP_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) if ((qp->qp_type != IB_QPT_GSI) && (!udata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) rc = qedr_update_qp_state(dev, qp, cur_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) qp_params.new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) qp->state = qp_params.new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) int ib_qp_acc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (params->incoming_rdma_write_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (params->incoming_rdma_read_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (params->incoming_atomic_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return ib_qp_acc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) int qedr_query_qp(struct ib_qp *ibqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) struct ib_qp_attr *qp_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) int attr_mask, struct ib_qp_init_attr *qp_init_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct qed_rdma_query_qp_out_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) struct qedr_qp *qp = get_qedr_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) struct qedr_dev *dev = qp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) memset(¶ms, 0, sizeof(params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) memset(qp_attr, 0, sizeof(*qp_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) memset(qp_init_attr, 0, sizeof(*qp_init_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (qp->qp_type != IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) qp_attr->qp_state = qedr_get_ibqp_state(params.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) qp_attr->path_mig_state = IB_MIG_MIGRATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) qp_attr->rq_psn = params.rq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) qp_attr->sq_psn = params.sq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) qp_attr->dest_qp_num = params.dest_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) qp_attr->cap.max_send_wr = qp->sq.max_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) qp_attr->cap.max_recv_wr = qp->rq.max_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) qp_attr->cap.max_send_sge = qp->sq.max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) qp_attr->cap.max_recv_sge = qp->rq.max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) qp_attr->cap.max_inline_data = dev->attr.max_inline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) qp_init_attr->cap = qp_attr->cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) params.flow_label, qp->sgid_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) params.hop_limit_ttl, params.traffic_class_tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) rdma_ah_set_sl(&qp_attr->ah_attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) qp_attr->timeout = params.timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) qp_attr->rnr_retry = params.rnr_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) qp_attr->retry_cnt = params.retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) qp_attr->pkey_index = params.pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) qp_attr->port_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) qp_attr->alt_pkey_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) qp_attr->alt_port_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) qp_attr->alt_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) qp_attr->max_rd_atomic = params.max_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) qp_attr->cap.max_inline_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) struct qedr_qp *qp = get_qedr_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) struct qedr_dev *dev = qp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) struct ib_qp_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) int attr_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) qp, qp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (rdma_protocol_roce(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) (qp->state != QED_ROCE_QP_STATE_ERR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) (qp->state != QED_ROCE_QP_STATE_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) attr.qp_state = IB_QPS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) attr_mask |= IB_QP_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) /* Change the QP state to ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) /* If connection establishment started the WAIT_FOR_CONNECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * bit will be on and we need to Wait for the establishment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * to complete before destroying the qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) &qp->iwarp_cm_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) wait_for_completion(&qp->iwarp_cm_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * bit will be on, and we need to wait for the disconnect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * complete before continuing. We can use the same completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) * iwarp_cm_comp, since this is the only place that waits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * this completion and it is sequential. In addition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * disconnect can't occur before the connection is fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * established, therefore if WAIT_FOR_DISCONNECT is on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) * means WAIT_FOR_CONNECT is also on and the completion for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) * CONNECT already occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) &qp->iwarp_cm_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) wait_for_completion(&qp->iwarp_cm_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (qp->qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) qedr_destroy_gsi_qp(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) /* We need to remove the entry from the xarray before we release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * qp_id to avoid a race of the qp_id being reallocated and failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) * on xa_insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) xa_erase(&dev->qps, qp->qp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) qedr_free_qp_resources(dev, qp, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (rdma_protocol_iwarp(&dev->ibdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) qedr_iw_qp_rem_ref(&qp->ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) struct qedr_ah *ah = get_qedr_ah(ibah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) struct qedr_ah *ah = get_qedr_ah(ibah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) rdma_destroy_ah_attr(&ah->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct qedr_pbl *pbl, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (info->pbl_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) list_add_tail(&info->pbl_table->list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) &info->free_pbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (!list_empty(&info->inuse_pbl_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) list_del(&pbl->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) qedr_free_pbl(dev, &info->pbl_info, pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) size_t page_list_len, bool two_layered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) struct qedr_pbl *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) INIT_LIST_HEAD(&info->free_pbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) INIT_LIST_HEAD(&info->inuse_pbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) page_list_len, two_layered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (IS_ERR(info->pbl_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) rc = PTR_ERR(info->pbl_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) &info->pbl_table->pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) /* in usual case we use 2 PBLs, so we add one to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) * list and allocating another one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (IS_ERR(tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) list_add_tail(&tmp->list_entry, &info->free_pbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) free_mr_info(dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) u64 usr_addr, int acc, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) struct qedr_dev *dev = get_qedr_dev(ibpd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct qedr_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct qedr_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) DP_DEBUG(dev, QEDR_MSG_MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) pd->pd_id, start, len, usr_addr, acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) mr = kzalloc(sizeof(*mr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) mr->type = QEDR_MR_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) mr->umem = ib_umem_get(ibpd->device, start, len, acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (IS_ERR(mr->umem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) rc = init_mr_info(dev, &mr->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) &mr->info.pbl_info, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) /* Index only, 18 bit long, lkey = itid << 8 | key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) mr->hw_mr.key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) mr->hw_mr.pd = pd->pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) mr->hw_mr.local_read = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) mr->hw_mr.mw_bind = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) mr->hw_mr.page_size_log = PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) mr->hw_mr.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) mr->hw_mr.vaddr = usr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) mr->hw_mr.phy_mr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) mr->hw_mr.dma_mr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) DP_ERR(dev, "roce register tid returned an error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) mr->hw_mr.remote_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) mr->ibmr.lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) return &mr->ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) struct qedr_mr *mr = get_qedr_mr(ib_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (mr->type != QEDR_MR_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) free_mr_info(dev, &mr->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) /* it could be user registered memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) ib_umem_release(mr->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) int max_page_list_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) struct qedr_pd *pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) struct qedr_dev *dev = get_qedr_dev(ibpd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct qedr_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) DP_DEBUG(dev, QEDR_MSG_MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) max_page_list_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) mr = kzalloc(sizeof(*mr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) mr->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) mr->type = QEDR_MR_FRMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) /* Index only, 18 bit long, lkey = itid << 8 | key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) mr->hw_mr.key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) mr->hw_mr.pd = pd->pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) mr->hw_mr.local_read = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) mr->hw_mr.local_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) mr->hw_mr.remote_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) mr->hw_mr.remote_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) mr->hw_mr.remote_atomic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) mr->hw_mr.mw_bind = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) mr->hw_mr.pbl_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) mr->hw_mr.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) mr->hw_mr.vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) mr->hw_mr.phy_mr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) mr->hw_mr.dma_mr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) DP_ERR(dev, "roce register tid returned an error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) mr->ibmr.rkey = mr->ibmr.lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) u32 max_num_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) struct qedr_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (mr_type != IB_MR_TYPE_MEM_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) mr = __qedr_alloc_mr(ibpd, max_num_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (IS_ERR(mr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return &mr->ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct qedr_mr *mr = get_qedr_mr(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) struct qedr_pbl *pbl_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct regpair *pbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) u32 pbes_in_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) mr->npages, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) pbe = (struct regpair *)pbl_table->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) pbe += mr->npages % pbes_in_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) pbe->lo = cpu_to_le32((u32)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) mr->npages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) int work = info->completed - info->completed_handled - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) struct qedr_pbl *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) /* Free all the page list that are possible to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) * (all the ones that were invalidated), under the assumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) * that if an FMR was completed successfully that means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) * if there was an invalidate operation before it also ended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) pbl = list_first_entry(&info->inuse_pbl_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) struct qedr_pbl, list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) list_move_tail(&pbl->list_entry, &info->free_pbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) info->completed_handled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) int sg_nents, unsigned int *sg_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) struct qedr_mr *mr = get_qedr_mr(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) mr->npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) handle_completed_mrs(mr->dev, &mr->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) struct qedr_dev *dev = get_qedr_dev(ibpd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) struct qedr_pd *pd = get_qedr_pd(ibpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) struct qedr_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) mr = kzalloc(sizeof(*mr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) mr->type = QEDR_MR_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) /* index only, 18 bit long, lkey = itid << 8 | key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) mr->hw_mr.pd = pd->pd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) mr->hw_mr.local_read = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) mr->hw_mr.dma_mr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) DP_ERR(dev, "roce register tid returned an error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) mr->hw_mr.remote_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) return &mr->ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) return (((wq->prod + 1) % wq->max_wr) == wq->cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) static int sge_data_len(struct ib_sge *sg_list, int num_sge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) int i, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) for (i = 0; i < num_sge; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) len += sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) static void swap_wqe_data64(u64 *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) *p = cpu_to_be64(cpu_to_le64(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) struct qedr_qp *qp, u8 *wqe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) const struct ib_send_wr **bad_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) u8 *bits, u8 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) char *seg_prt, *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) int i, seg_siz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (!data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) *bits |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) seg_prt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) wqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) seg_siz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) /* Copy data inline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) u32 len = wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) u32 cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /* New segment required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (!seg_siz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) wqe = (char *)qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) seg_prt = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) seg_siz = sizeof(struct rdma_sq_common_wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) (*wqe_size)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) /* Calculate currently allowed length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) cur = min_t(u32, len, seg_siz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) memcpy(seg_prt, src, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) /* Update segment variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) seg_prt += cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) seg_siz -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) /* Update sge variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) src += cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) len -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) /* Swap fully-completed segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) if (!seg_siz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) swap_wqe_data64((u64 *)wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) /* swap last not completed segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (seg_siz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) swap_wqe_data64((u64 *)wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) return data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) DMA_REGPAIR_LE(sge->addr, vaddr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) (sge)->length = cpu_to_le32(vlength); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) (sge)->flags = cpu_to_le32(vflags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) (hdr)->num_sges = num_sge; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) DMA_REGPAIR_LE(sge->addr, vaddr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) (sge)->length = cpu_to_le32(vlength); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) (sge)->l_key = cpu_to_le32(vlkey); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) u32 data_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) sge->length = cpu_to_le32(wr->sg_list[i].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) data_size += wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (wqe_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) *wqe_size += wr->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) return data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) struct rdma_sq_rdma_wqe_1st *rwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) struct rdma_sq_rdma_wqe_2nd *rwqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) if (wr->send_flags & IB_SEND_INLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) wr->opcode == IB_WR_RDMA_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) bad_wr, &rwqe->flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) struct rdma_sq_send_wqe_1st *swqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) struct rdma_sq_send_wqe_2st *swqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) memset(swqe2, 0, sizeof(*swqe2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (wr->send_flags & IB_SEND_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) bad_wr, &swqe->flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) static int qedr_prepare_reg(struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) struct rdma_sq_fmr_wqe_1st *fwqe1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) const struct ib_reg_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) struct qedr_mr *mr = get_qedr_mr(wr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) struct rdma_sq_fmr_wqe_2nd *fwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) fwqe1->l_key = wr->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) fwqe2->access_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) !!(wr->access & IB_ACCESS_REMOTE_READ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) !!(wr->access & IB_ACCESS_REMOTE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) !!(wr->access & IB_ACCESS_LOCAL_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) fwqe2->fmr_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) ilog2(mr->ibmr.page_size) - 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) fwqe2->length_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) fwqe2->length_lo = mr->ibmr.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) qp->wqe_wr_id[qp->sq.prod].mr = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) return IB_WC_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) return IB_WC_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) case IB_WR_RDMA_READ_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) return IB_WC_RDMA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) return IB_WC_COMP_SWAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) return IB_WC_FETCH_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) return IB_WC_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) return IB_WC_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) return IB_WC_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) static inline bool qedr_can_post_send(struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) int wq_is_full, err_wr, pbl_is_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) struct qedr_dev *dev = qp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) /* prevent SQ overflow and/or processing of a bad WR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) err_wr = wr->num_sge > qp->sq.max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) wq_is_full = qedr_wq_is_full(&qp->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) QEDR_MAX_SQE_ELEMENTS_PER_SQE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) if (wq_is_full || err_wr || pbl_is_full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (pbl_is_full &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) struct qedr_dev *dev = get_qedr_dev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) struct qedr_qp *qp = get_qedr_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) struct rdma_sq_atomic_wqe_1st *awqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) struct rdma_sq_atomic_wqe_2nd *awqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) struct rdma_sq_atomic_wqe_3rd *awqe3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) struct rdma_sq_send_wqe_2st *swqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) struct rdma_sq_local_inv_wqe *iwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) struct rdma_sq_rdma_wqe_2nd *rwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) struct rdma_sq_send_wqe_1st *swqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) struct rdma_sq_rdma_wqe_1st *rwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) struct rdma_sq_fmr_wqe_1st *fwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) struct rdma_sq_common_wqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) bool comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (!qedr_can_post_send(qp, wr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) wqe = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) qp->wqe_wr_id[qp->sq.prod].signaled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) wqe->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) !!(wr->send_flags & IB_SEND_SOLICITED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) !!(wr->send_flags & IB_SEND_FENCE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) wqe->prev_wqe_size = qp->prev_wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) swqe = (struct rdma_sq_send_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) swqe->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) swqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) swqe->length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) qp->prev_wqe_size = swqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) swqe = (struct rdma_sq_send_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) swqe->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) swqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) swqe->length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) qp->prev_wqe_size = swqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) swqe = (struct rdma_sq_send_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) swqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) swqe->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) swqe->length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) qp->prev_wqe_size = swqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) rwqe->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) rwqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) rwqe->length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) qp->prev_wqe_size = rwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) rwqe->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) rwqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) rwqe->length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) qp->prev_wqe_size = rwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) case IB_WR_RDMA_READ_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) fallthrough; /* same is identical to RDMA READ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) rwqe->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) rwqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) rwqe->length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) qp->prev_wqe_size = rwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) awqe1->wqe_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) awqe2 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) awqe3 = qed_chain_produce(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) DMA_REGPAIR_LE(awqe3->swap_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) atomic_wr(wr)->compare_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) DMA_REGPAIR_LE(awqe3->swap_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) atomic_wr(wr)->swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) DMA_REGPAIR_LE(awqe3->cmp_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) atomic_wr(wr)->compare_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) qedr_prepare_sq_sges(qp, NULL, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) qp->prev_wqe_size = awqe1->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) iwqe->wqe_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) iwqe->inv_l_key = wr->ex.invalidate_rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) qp->prev_wqe_size = iwqe->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) fwqe1->wqe_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) qp->prev_wqe_size = fwqe1->wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (*bad_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) u16 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) /* Restore prod to its position before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) * this WR was processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) value = le16_to_cpu(qp->sq.db_data.data.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) qed_chain_set_prod(&qp->sq.pbl, value, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) /* Restore prev_wqe_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) qp->prev_wqe_size = wqe->prev_wqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) DP_ERR(dev, "POST SEND FAILED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) struct qedr_dev *dev = get_qedr_dev(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) struct qedr_qp *qp = get_qedr_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) *bad_wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) if (qp->qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) return qedr_gsi_post_send(ibqp, wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) spin_lock_irqsave(&qp->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) if (rdma_protocol_roce(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) (qp->state != QED_ROCE_QP_STATE_ERR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) (qp->state != QED_ROCE_QP_STATE_SQD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) spin_unlock_irqrestore(&qp->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) DP_DEBUG(dev, QEDR_MSG_CQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) "QP in wrong state! QP icid=0x%x state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) qp->icid, qp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) rc = __qedr_post_send(ibqp, wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) qedr_inc_sw_prod(&qp->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) qp->sq.db_data.data.value++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) /* Trigger doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) * If there was a failure in the first WR then it will be triggered in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) * vane. However this is not harmful (as long as the producer value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) * unchanged). For performance reasons we avoid checking for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) * redundant doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) * qp->wqe_wr_id is accessed during qedr_poll_cq, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) * soon as we give the doorbell, we could get a completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) * for this wr, therefore we need to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) * memory is updated before giving the doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) * During qedr_poll_cq, rmb is called before accessing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) * cqe. This covers for the smp_rmb as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) writel(qp->sq.db_data.raw, qp->sq.db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) spin_unlock_irqrestore(&qp->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) u32 used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) /* Calculate number of elements used based on producer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) * count and consumer count and subtract it from max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) * work request supported so that we get elements left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) return hw_srq->max_wr - used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) struct qedr_srq *srq = get_qedr_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) struct qedr_srq_hwq_info *hw_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) struct qedr_dev *dev = srq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) struct qed_chain *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) u32 num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) spin_lock_irqsave(&srq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) hw_srq = &srq->hw_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) pbl = &srq->hw_srq.pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) struct rdma_srq_wqe_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) if (!qedr_srq_elem_left(hw_srq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) wr->num_sge > srq->hw_srq.max_sges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) hw_srq->wr_prod_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) atomic_read(&hw_srq->wr_cons_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) wr->num_sge, srq->hw_srq.max_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) hdr = qed_chain_produce(pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) num_sge = wr->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) /* Set number of sge and work request id in header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) srq->hw_srq.wr_prod_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) hw_srq->wqe_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) hw_srq->sge_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) DP_DEBUG(dev, QEDR_MSG_SRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) /* Set SGE length, lkey and address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) wr->sg_list[i].length, wr->sg_list[i].lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) DP_DEBUG(dev, QEDR_MSG_SRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) "[%d]: len %d key %x addr %x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) i, srq_sge->length, srq_sge->l_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) srq_sge->addr.hi, srq_sge->addr.lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) hw_srq->sge_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) /* Update WQE and SGE information before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) * updating producer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) /* SRQ producer is 8 bytes. Need to update SGE producer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) * in first 4 bytes and need to update WQE producer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) * next 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) /* Make sure sge producer is updated first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) qed_chain_get_elem_left(pbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) spin_unlock_irqrestore(&srq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) struct qedr_qp *qp = get_qedr_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) struct qedr_dev *dev = qp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) if (qp->qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) return qedr_gsi_post_recv(ibqp, wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) spin_lock_irqsave(&qp->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) if (qp->state == QED_ROCE_QP_STATE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) spin_unlock_irqrestore(&qp->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) wr->num_sge > qp->rq.max_sges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) qed_chain_get_elem_left_u32(&qp->rq.pbl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) qp->rq.max_sges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) struct rdma_rq_sge *rqe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) qed_chain_produce(&qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) /* First one must include the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) * of SGE in the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) wr->num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) wr->sg_list[i].lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) RQ_SGE_SET(rqe, wr->sg_list[i].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) wr->sg_list[i].length, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) /* Special case of no sges. FW requires between 1-4 sges...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) * in this case we need to post 1 sge with length zero. this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) * because rdma write with immediate consumes an RQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) if (!wr->num_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) struct rdma_rq_sge *rqe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) qed_chain_produce(&qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) /* First one must include the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) * of SGE in the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) RQ_SGE_SET(rqe, 0, 0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) qedr_inc_sw_prod(&qp->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) * soon as we give the doorbell, we could get a completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) * for this wr, therefore we need to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) * memory is update before giving the doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) * During qedr_poll_cq, rmb is called before accessing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) * cqe. This covers for the smp_rmb as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) qp->rq.db_data.data.value++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) writel(qp->rq.db_data.raw, qp->rq.db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) spin_unlock_irqrestore(&qp->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) struct rdma_cqe_requester *resp_cqe = &cqe->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) cq->pbl_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) struct rdma_cqe_requester *resp_cqe = &cqe->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) struct qedr_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) resp_cqe->qp_handle.lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) return qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) struct rdma_cqe_requester *resp_cqe = &cqe->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) /* Return latest CQE (needs processing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) static union rdma_cqe *get_cqe(struct qedr_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) return cq->latest_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) /* In fmr we need to increase the number of fmr completed counter for the fmr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) * algorithm determining whether we can free a pbl or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) * we need to perform this whether the work request was signaled or not. for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) * this purpose we call this function from the condition that checks if a wr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) * should be skipped, to make sure we don't miss it ( possibly this fmr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) * operation was not signalted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) struct qedr_cq *cq, int num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) u16 cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) while (num_entries && qp->sq.wqe_cons != hw_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) qedr_chk_if_fmr(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) /* skip WC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) goto next_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) /* fill WC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) wc->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) wc->vendor_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) wc->wc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) wc->src_qp = qp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) wc->qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) switch (wc->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) case IB_WC_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) case IB_WC_COMP_SWAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) case IB_WC_FETCH_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) wc->byte_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) case IB_WC_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) case IB_WC_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) case IB_WC_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) num_entries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) wc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) next_cqe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) qed_chain_consume(&qp->sq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) qedr_inc_sw_cons(&qp->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) static int qedr_poll_cq_req(struct qedr_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) struct qedr_qp *qp, struct qedr_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) int num_entries, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) struct rdma_cqe_requester *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) switch (req->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) case RDMA_CQE_REQ_STS_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) IB_WC_SUCCESS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) if (qp->state != QED_ROCE_QP_STATE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) DP_DEBUG(dev, QEDR_MSG_CQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) IB_WC_WR_FLUSH_ERR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) /* process all WQE before the cosumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) qp->state = QED_ROCE_QP_STATE_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) cnt = process_req(dev, qp, cq, num_entries, wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) req->sq_cons - 1, IB_WC_SUCCESS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) wc += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) /* if we have extra WC fill it with actual error info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) if (cnt < num_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) enum ib_wc_status wc_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) switch (req->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) wc_status = IB_WC_BAD_RESP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) wc_status = IB_WC_LOC_LEN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) wc_status = IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) wc_status = IB_WC_LOC_PROT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) wc_status = IB_WC_MW_BIND_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) wc_status = IB_WC_REM_INV_REQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) wc_status = IB_WC_REM_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) wc_status = IB_WC_REM_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) wc_status = IB_WC_RNR_RETRY_EXC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) wc_status = IB_WC_RETRY_EXC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) cq->icid, qp->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) wc_status = IB_WC_GENERAL_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) wc_status, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) static inline int qedr_cqe_resp_status_to_ib(u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) return IB_WC_LOC_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) return IB_WC_LOC_LEN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) return IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) return IB_WC_LOC_PROT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) return IB_WC_MW_BIND_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) return IB_WC_REM_INV_RD_REQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) case RDMA_CQE_RESP_STS_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) return IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) return IB_WC_GENERAL_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) wc->status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) wc->byte_len = le32_to_cpu(resp->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) if (resp->flags & QEDR_RESP_IMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) wc->wc_flags |= IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) if (resp->flags & QEDR_RESP_RDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) if (resp->flags & QEDR_RESP_INV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) } else if (resp->flags & QEDR_RESP_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) wc->wc_flags |= IB_WC_WITH_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) if (resp->flags & QEDR_RESP_RDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) } else if (resp->flags & QEDR_RESP_RDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) struct qedr_cq *cq, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) struct rdma_cqe_responder *resp, u64 wr_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) wc->opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) wc->wc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) if (qedr_set_ok_cqe_resp_wc(resp, wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) cq, cq->icid, resp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) wc->status = qedr_cqe_resp_status_to_ib(resp->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) if (wc->status == IB_WC_GENERAL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) "CQ %p (icid=%d) contains an invalid CQE status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) cq, cq->icid, resp->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) /* Fill the rest of the WC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) wc->vendor_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) wc->src_qp = qp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) wc->qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) wc->wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) struct qedr_cq *cq, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) struct rdma_cqe_responder *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) struct qedr_srq *srq = qp->srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) u64 wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) le32_to_cpu(resp->srq_wr_id.lo), u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) wc->status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) wc->vendor_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) wc->wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) wc->byte_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) wc->src_qp = qp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) wc->qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) wc->wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) __process_resp_one(dev, qp, cq, wc, resp, wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) atomic_inc(&srq->hw_srq.wr_cons_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) struct qedr_cq *cq, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) struct rdma_cqe_responder *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) __process_resp_one(dev, qp, cq, wc, resp, wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) qed_chain_consume(&qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) qedr_inc_sw_cons(&qp->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) int num_entries, struct ib_wc *wc, u16 hw_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) u16 cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) while (num_entries && qp->rq.wqe_cons != hw_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) /* fill WC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) wc->status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) wc->vendor_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) wc->wc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) wc->src_qp = qp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) wc->byte_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) wc->qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) num_entries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) wc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) qed_chain_consume(&qp->rq.pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) qedr_inc_sw_cons(&qp->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) struct rdma_cqe_responder *resp, int *update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) consume_cqe(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) *update |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) struct qedr_cq *cq, int num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) struct rdma_cqe_responder *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) consume_cqe(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) struct qedr_cq *cq, int num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) struct ib_wc *wc, struct rdma_cqe_responder *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) int *update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) cnt = process_resp_flush(qp, cq, num_entries, wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) resp->rq_cons_or_srq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) try_consume_resp_cqe(cq, qp, resp, update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) cnt = process_resp_one(dev, qp, cq, wc, resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) consume_cqe(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) *update |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) struct rdma_cqe_requester *req, int *update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) consume_cqe(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) *update |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) struct qedr_dev *dev = get_qedr_dev(ibcq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) struct qedr_cq *cq = get_qedr_cq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) union rdma_cqe *cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) u32 old_cons, new_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) int update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) if (cq->destroyed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) DP_ERR(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) cq, cq->icid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) if (cq->cq_type == QEDR_CQ_TYPE_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) return qedr_gsi_poll_cq(ibcq, num_entries, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) spin_lock_irqsave(&cq->cq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) cqe = cq->latest_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) while (num_entries && is_valid_cqe(cq, cqe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) struct qedr_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) /* prevent speculative reads of any field of CQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) qp = cqe_get_qp(cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) wc->qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) switch (cqe_get_type(cqe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) case RDMA_CQE_TYPE_REQUESTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) &cqe->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) try_consume_req_cqe(cq, qp, &cqe->req, &update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) case RDMA_CQE_TYPE_RESPONDER_RQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) &cqe->resp, &update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) case RDMA_CQE_TYPE_RESPONDER_SRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) wc, &cqe->resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) update = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) case RDMA_CQE_TYPE_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) DP_ERR(dev, "Error: invalid CQE type = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) cqe_get_type(cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) num_entries -= cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) wc += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) done += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) cqe = get_cqe(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) cq->cq_cons += new_cons - old_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) if (update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) /* doorbell notifies abount latest VALID entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) * but chain already point to the next INVALID one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) spin_unlock_irqrestore(&cq->cq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) u8 port_num, const struct ib_wc *in_wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) const struct ib_grh *in_grh, const struct ib_mad *in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) struct ib_mad *out_mad, size_t *out_mad_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) u16 *out_mad_pkey_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }