^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/mlx4/cq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/mlx4/qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/mlx4/srq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "mlx4_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <rdma/mlx4-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ibcq->comp_handler(ibcq, ibcq->cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct ib_cq *ibcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pr_warn("Unexpected event type %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) "on CQ %06x\n", type, cq->cqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ibcq = &to_mibcq(cq)->ibcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (ibcq->event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) event.device = ibcq->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) event.event = IB_EVENT_CQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) event.element.cq = ibcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ibcq->event_handler(&event, ibcq->cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void *get_cqe(struct mlx4_ib_cq *cq, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return get_cqe_from_buf(&cq->buf, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return get_sw_cqe(cq, cq->mcq.cons_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct mlx4_ib_cq *mcq = to_mcq(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct mlx4_ib_dev *dev = to_mdev(cq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) PAGE_SIZE * 2, &buf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) buf->entry_size = dev->dev->caps.cqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) &buf->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) err_mtt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) mlx4_mtt_cleanup(dev->dev, &buf->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct mlx4_ib_cq_buf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct ib_umem **umem, u64 buf_addr, int cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int cqe_size = dev->dev->caps.cqe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) IB_ACCESS_LOCAL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (IS_ERR(*umem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return PTR_ERR(*umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) err_mtt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mlx4_mtt_cleanup(dev->dev, &buf->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ib_umem_release(*umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct ib_device *ibdev = ibcq->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int entries = attr->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int vector = attr->comp_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct mlx4_ib_dev *dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct mlx4_ib_cq *cq = to_mcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct mlx4_uar *uar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void *buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) udata, struct mlx4_ib_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (entries < 1 || entries > dev->dev->caps.max_cqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) entries = roundup_pow_of_two(entries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) cq->ibcq.cqe = entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mutex_init(&cq->resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_lock_init(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cq->resize_umem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) cq->create_flags = attr->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) INIT_LIST_HEAD(&cq->send_qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) INIT_LIST_HEAD(&cq->recv_qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct mlx4_ib_create_cq ucmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto err_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) buf_addr = (void *)(unsigned long)ucmd.buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ucmd.buf_addr, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto err_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) uar = &context->uar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) err = mlx4_db_alloc(dev->dev, &cq->db, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto err_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cq->mcq.set_ci_db = cq->db.db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cq->mcq.arm_db = cq->db.db + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *cq->mcq.set_ci_db = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *cq->mcq.arm_db = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto err_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) buf_addr = &cq->buf.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) uar = &dev->priv_uar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (dev->eq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) vector = dev->eq_table[vector % ibdev->num_comp_vectors];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) &cq->mcq, vector, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) !!(cq->create_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) buf_addr, !!udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto err_dbmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) cq->mcq.comp = mlx4_ib_cq_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) cq->mcq.event = mlx4_ib_cq_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto err_cq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) err_cq_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) mlx4_cq_free(dev->dev, &cq->mcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) err_dbmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mlx4_ib_db_unmap_user(context, &cq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) err_mtt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ib_umem_release(cq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) err_db:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mlx4_db_free(dev->dev, &cq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) err_cq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (cq->resize_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!cq->resize_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) kfree(cq->resize_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) cq->resize_buf->cqe = entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int entries, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct mlx4_ib_resize_cq ucmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (cq->resize_umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!cq->resize_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) &cq->resize_umem, ucmd.buf_addr, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kfree(cq->resize_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cq->resize_buf->cqe = entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) i = cq->mcq.cons_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) while (get_sw_cqe(cq, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return i - cq->mcq.cons_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct mlx4_cqe *cqe, *new_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int cqe_size = cq->buf.entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int cqe_inc = cqe_size == 64 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) i = cq->mcq.cons_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cqe = get_cqe(cq, i & cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) cqe += cqe_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) (i + 1) & cq->resize_buf->cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) new_cqe += cqe_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) cqe += cqe_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ++cq->mcq.cons_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct mlx4_ib_cq *cq = to_mcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct mlx4_mtt mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int outst_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) mutex_lock(&cq->resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (entries < 1 || entries > dev->dev->caps.max_cqes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) entries = roundup_pow_of_two(entries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (entries == ibcq->cqe + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (entries > dev->dev->caps.max_cqes + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (ibcq->uobject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Can't be smaller than the number of outstanding CQEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (entries < outst_cqe + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) err = mlx4_alloc_resize_buf(dev, cq, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mtt = cq->buf.mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) mlx4_mtt_cleanup(dev->dev, &mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ibcq->uobject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cq->buf = cq->resize_buf->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cq->ibcq.cqe = cq->resize_buf->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ib_umem_release(cq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cq->umem = cq->resize_umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) kfree(cq->resize_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) cq->resize_umem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct mlx4_ib_cq_buf tmp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int tmp_cqe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) spin_lock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (cq->resize_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mlx4_ib_cq_resize_copy_cqes(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) tmp_buf = cq->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) tmp_cqe = cq->ibcq.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) cq->buf = cq->resize_buf->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) cq->ibcq.cqe = cq->resize_buf->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) kfree(cq->resize_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) spin_unlock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (tmp_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!ibcq->uobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cq->resize_buf->cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) kfree(cq->resize_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ib_umem_release(cq->resize_umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) cq->resize_umem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mutex_unlock(&cq->resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct mlx4_ib_dev *dev = to_mdev(cq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct mlx4_ib_cq *mcq = to_mcq(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mlx4_cq_free(dev->dev, &mcq->mcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mlx4_ib_db_unmap_user(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct mlx4_ib_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ibucontext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) &mcq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) mlx4_db_free(dev->dev, &mcq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ib_umem_release(mcq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void dump_cqe(void *cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __be32 *buf = cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pr_debug("local QP operation err "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) "(QPN %06x, WQE index %x, vendor syndrome %02x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) "opcode = %02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cqe->vendor_err_syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dump_cqe(cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) switch (cqe->syndrome) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) wc->status = IB_WC_LOC_LEN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) wc->status = IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) wc->status = IB_WC_LOC_PROT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) wc->status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) case MLX4_CQE_SYNDROME_MW_BIND_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) wc->status = IB_WC_MW_BIND_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) wc->status = IB_WC_BAD_RESP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) wc->status = IB_WC_LOC_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) wc->status = IB_WC_REM_INV_REQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) wc->status = IB_WC_REM_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) wc->status = IB_WC_REM_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) wc->status = IB_WC_RETRY_EXC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) wc->status = IB_WC_RNR_RETRY_EXC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) wc->status = IB_WC_REM_ABORT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) wc->status = IB_WC_GENERAL_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) wc->vendor_err = cqe->vendor_err_syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) (status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) MLX4_CQE_STATUS_UDP)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) (checksum == cpu_to_be16(0xffff))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned tail, struct mlx4_cqe *cqe, int is_eth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct mlx4_ib_proxy_sqp_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ib_dma_sync_single_for_cpu(qp->ibqp.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) qp->sqp_proxy_rcv[tail].map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) sizeof (struct mlx4_ib_proxy_sqp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) wc->dlid_path_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) wc->slid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct ib_wc *wc, int *npolled, int is_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct mlx4_ib_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) wq = is_send ? &qp->sq : &qp->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) cur = wq->head - wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (cur == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) for (i = 0; i < cur && *npolled < num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) wc->status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) wq->tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) (*npolled)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) wc->qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) wc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct ib_wc *wc, int *npolled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct mlx4_ib_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *npolled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* Find uncompleted WQEs belonging to that cq and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * simulated FLUSH_ERR completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (*npolled >= num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (*npolled >= num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct mlx4_ib_qp **cur_qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct mlx4_cqe *cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct mlx4_qp *mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct mlx4_ib_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct mlx4_ib_srq *srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct mlx4_srq *msrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int is_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int is_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int is_eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u32 g_mlpath_rqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) u16 wqe_ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) unsigned tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) repoll:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cqe = next_cqe_sw(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (cq->buf.entry_size == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) cqe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ++cq->mcq.cons_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Make sure we read CQ entry contents after we've checked the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * ownership bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) MLX4_CQE_OPCODE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Resize CQ in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (cq->resize_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cq->buf = cq->resize_buf->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) cq->ibcq.cqe = cq->resize_buf->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) kfree(cq->resize_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) cq->resize_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto repoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!*cur_qp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * We do not have to take the QP table lock here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * because CQs will be locked while QPs are removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * from the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) be32_to_cpu(cqe->vlan_my_qpn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *cur_qp = to_mibqp(mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) wc->qp = &(*cur_qp)->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u32 srq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) srq_num = g_mlpath_rqpn & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* SRQ is also in the radix tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) srq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (is_send) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) wq = &(*cur_qp)->sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!(*cur_qp)->sq_signal_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) wqe_ctr = be16_to_cpu(cqe->wqe_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ++wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else if ((*cur_qp)->ibqp.srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) srq = to_msrq((*cur_qp)->ibqp.srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) wqe_ctr = be16_to_cpu(cqe->wqe_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) wc->wr_id = srq->wrid[wqe_ctr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) mlx4_ib_free_srq_wqe(srq, wqe_ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) } else if (msrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) srq = to_mibsrq(msrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) wqe_ctr = be16_to_cpu(cqe->wqe_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) wc->wr_id = srq->wrid[wqe_ctr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) mlx4_ib_free_srq_wqe(srq, wqe_ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) wq = &(*cur_qp)->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) tail = wq->tail & (wq->wqe_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) wc->wr_id = wq->wrid[tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ++wq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (unlikely(is_error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) wc->status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (is_send) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) wc->wc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) case MLX4_OPCODE_RDMA_WRITE_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) wc->wc_flags |= IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) case MLX4_OPCODE_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) wc->opcode = IB_WC_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) case MLX4_OPCODE_SEND_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) wc->wc_flags |= IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) case MLX4_OPCODE_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) case MLX4_OPCODE_SEND_INVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) wc->opcode = IB_WC_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) case MLX4_OPCODE_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) wc->opcode = IB_WC_RDMA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) wc->byte_len = be32_to_cpu(cqe->byte_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) case MLX4_OPCODE_ATOMIC_CS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) wc->opcode = IB_WC_COMP_SWAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) wc->byte_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) case MLX4_OPCODE_ATOMIC_FA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) wc->opcode = IB_WC_FETCH_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) wc->byte_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) case MLX4_OPCODE_MASKED_ATOMIC_CS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) wc->opcode = IB_WC_MASKED_COMP_SWAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) wc->byte_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) case MLX4_OPCODE_MASKED_ATOMIC_FA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) wc->opcode = IB_WC_MASKED_FETCH_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) wc->byte_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case MLX4_OPCODE_LSO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) wc->opcode = IB_WC_LSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) case MLX4_OPCODE_FMR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) wc->opcode = IB_WC_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) case MLX4_OPCODE_LOCAL_INVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) wc->opcode = IB_WC_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) wc->byte_len = be32_to_cpu(cqe->byte_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) wc->wc_flags = IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) wc->ex.imm_data = cqe->immed_rss_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) case MLX4_RECV_OPCODE_SEND_INVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) wc->opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) wc->wc_flags = IB_WC_WITH_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) case MLX4_RECV_OPCODE_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) wc->opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) wc->wc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) case MLX4_RECV_OPCODE_SEND_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) wc->opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) wc->wc_flags = IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) wc->ex.imm_data = cqe->immed_rss_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) is_eth = (rdma_port_get_link_layer(wc->qp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) (*cur_qp)->port) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) IB_LINK_LAYER_ETHERNET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if ((*cur_qp)->mlx4_ib_qp_type &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) (MLX4_IB_QPT_PROXY_SMI_OWNER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) is_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) wc->src_qp = g_mlpath_rqpn & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) cqe->badfcs_enc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (is_eth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) wc->slid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (be32_to_cpu(cqe->vlan_my_qpn) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) MLX4_CQE_CVLAN_PRESENT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) MLX4_CQE_VID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) wc->vlan_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) memcpy(wc->smac, cqe->smac, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) wc->slid = be16_to_cpu(cqe->rlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) wc->vlan_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct mlx4_ib_cq *cq = to_mcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct mlx4_ib_qp *cur_qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int npolled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) spin_lock_irqsave(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) for (npolled = 0; npolled < num_entries; ++npolled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mlx4_cq_set_ci(&cq->mcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) spin_unlock_irqrestore(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return npolled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mlx4_cq_arm(&to_mcq(ibcq)->mcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) to_mdev(ibcq->device)->uar_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) u32 prod_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int nfreed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct mlx4_cqe *cqe, *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) u8 owner_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * First we need to find the current producer index, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * know where to start cleaning from. It doesn't matter if HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * adds new entries after this loop -- the QP we're worried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * about is already in RESET, so the new entries won't come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * from our QP and therefore don't need to be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Now sweep backwards through the CQ, removing CQ entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * that match our QP by copying older entries on top of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) cqe += cqe_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ++nfreed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) } else if (nfreed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) dest += cqe_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) memcpy(dest, cqe, sizeof *cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dest->owner_sr_opcode = owner_bit |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (nfreed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) cq->mcq.cons_index += nfreed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * Make sure update of buffer contents is done before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * updating consumer index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) mlx4_cq_set_ci(&cq->mcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) spin_lock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) __mlx4_ib_cq_clean(cq, qpn, srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) spin_unlock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }