^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2016 - 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "cq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "vt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct workqueue_struct *comp_vector_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * rvt_cq_enter - add a new entry to the completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @cq: completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @entry: work completion entry to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @solicited: true if @entry is solicited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * This may be called with qp->s_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Return: return true on success, else return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * false if cq is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct ib_uverbs_wc *uqueue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct ib_wc *kqueue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct rvt_cq_wc *u_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rvt_k_cq_wc *k_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_lock_irqsave(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (cq->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u_wc = cq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) uqueue = &u_wc->uqueue[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) k_wc = cq->kqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) kqueue = &k_wc->kqueue[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) head = k_wc->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) tail = k_wc->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Note that the head pointer might be writable by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * user processes.Take care to verify it is a sane value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (head >= (unsigned)cq->ibcq.cqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) head = cq->ibcq.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) next = head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (unlikely(next == tail || cq->cq_full)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct rvt_dev_info *rdi = cq->rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!cq->cq_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cq->cq_full = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_unlock_irqrestore(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (cq->ibcq.event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ev.device = cq->ibcq.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ev.element.cq = &cq->ibcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ev.event = IB_EVENT_CQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) trace_rvt_cq_enter(cq, entry, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (uqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) uqueue[head].wr_id = entry->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) uqueue[head].status = entry->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) uqueue[head].opcode = entry->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) uqueue[head].vendor_err = entry->vendor_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) uqueue[head].byte_len = entry->byte_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) uqueue[head].ex.imm_data = entry->ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) uqueue[head].qp_num = entry->qp->qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) uqueue[head].src_qp = entry->src_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) uqueue[head].wc_flags = entry->wc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) uqueue[head].pkey_index = entry->pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) uqueue[head].slid = ib_lid_cpu16(entry->slid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) uqueue[head].sl = entry->sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) uqueue[head].dlid_path_bits = entry->dlid_path_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) uqueue[head].port_num = entry->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Make sure entry is written before the head index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kqueue[head] = *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) k_wc->head = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (cq->notify == IB_CQ_NEXT_COMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) (cq->notify == IB_CQ_SOLICITED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) (solicited || entry->status != IB_WC_SUCCESS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * This will cause send_complete() to be called in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * another thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cq->notify = RVT_CQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cq->triggered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) &cq->comptask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_unlock_irqrestore(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) EXPORT_SYMBOL(rvt_cq_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void send_complete(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * The completion handler will most likely rearm the notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * and poll for all pending entries. If a new completion entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * is added while we are in this routine, queue_work()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * won't call us again until we return so we check triggered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * see if we need to call the handler again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u8 triggered = cq->triggered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * IPoIB connected mode assumes the callback is from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * soft IRQ. We simulate this by blocking "bottom halves".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * See the implementation for ipoib_cm_handle_tx_wc(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * netif_tx_lock_bh() and netif_tx_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (cq->triggered == triggered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * rvt_create_cq - create a completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @ibcq: Allocated CQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @attr: creation attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @udata: user data for libibverbs.so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Called by ib_create_cq() in the generic verbs code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct ib_device *ibdev = ibcq->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct rvt_cq_wc *u_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct rvt_k_cq_wc *k_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned int entries = attr->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int comp_vector = attr->comp_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (attr->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (entries < 1 || entries > rdi->dparms.props.max_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (comp_vector < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) comp_vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Allocate the completion queue entries and head/tail pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * This is allocated separately so that it can be resized and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * also mapped into user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * We need to use vmalloc() in order to support mmap and large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * numbers of entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) sz += sizeof(*u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u_wc = vmalloc_user(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!u_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) sz = sizeof(struct ib_wc) * (entries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sz += sizeof(*k_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) k_wc = vzalloc_node(sz, rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!k_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Return the address of the WC as the offset to mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * See rvt_mmap() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (IS_ERR(cq->ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) err = PTR_ERR(cq->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto bail_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) err = ib_copy_to_udata(udata, &cq->ip->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) sizeof(cq->ip->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto bail_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_lock_irq(&rdi->n_cqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spin_unlock_irq(&rdi->n_cqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) goto bail_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rdi->n_cqs_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spin_unlock_irq(&rdi->n_cqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (cq->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) spin_lock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_unlock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * The number of entries should be >= the number requested or return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) cq->rdi = rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (rdi->driver_f.comp_vect_cpu_lookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) cq->comp_vector_cpu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) cq->comp_vector_cpu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cpumask_first(cpumask_of_node(rdi->dparms.node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) cq->ibcq.cqe = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cq->notify = RVT_CQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_lock_init(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) INIT_WORK(&cq->comptask, send_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (u_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) cq->queue = u_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) cq->kqueue = k_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) trace_rvt_create_cq(cq, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) bail_ip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) kfree(cq->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bail_wc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) vfree(u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) vfree(k_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * rvt_destroy_cq - destroy a completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * @ibcq: the completion queue to destroy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @udata: user data or NULL for kernel object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Called by ib_destroy_cq() in the generic verbs code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct rvt_dev_info *rdi = cq->rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) flush_work(&cq->comptask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_lock_irq(&rdi->n_cqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rdi->n_cqs_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) spin_unlock_irq(&rdi->n_cqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (cq->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) kref_put(&cq->ip->ref, rvt_release_mmap_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) vfree(cq->kqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * rvt_req_notify_cq - change the notification type for a completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @ibcq: the completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @notify_flags: the type of notification to request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * This may be called from interrupt context. Also called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * ib_req_notify_cq() in the generic verbs code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Return: 0 for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) spin_lock_irqsave(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (cq->notify != IB_CQ_NEXT_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (cq->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (cq->kqueue->head != cq->kqueue->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock_irqrestore(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * rvt_resize_cq - change the size of the CQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @ibcq: the completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Return: 0 for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 head, tail, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct rvt_dev_info *rdi = cq->rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct rvt_cq_wc *u_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct rvt_cq_wc *old_u_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct rvt_k_cq_wc *k_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct rvt_k_cq_wc *old_k_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Need to use vmalloc() if we want to support large #s of entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) sz += sizeof(*u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) u_wc = vmalloc_user(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!u_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) sz = sizeof(struct ib_wc) * (cqe + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) sz += sizeof(*k_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) k_wc = vzalloc_node(sz, rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!k_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Check that we can write the offset to mmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_lock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Make sure head and tail are sane since they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * might be user writable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (u_wc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) old_u_wc = cq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) old_k_wc = cq->kqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) head = old_k_wc->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) tail = old_k_wc->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (head > (u32)cq->ibcq.cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) head = (u32)cq->ibcq.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (tail > (u32)cq->ibcq.cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tail = (u32)cq->ibcq.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (head < tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) n = cq->ibcq.cqe + 1 + head - tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) n = head - tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (unlikely((u32)cqe < n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) for (n = 0; tail != head; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (u_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) u_wc->uqueue[n] = old_u_wc->uqueue[tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) k_wc->kqueue[n] = old_k_wc->kqueue[tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (tail == (u32)cq->ibcq.cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) cq->ibcq.cqe = cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (u_wc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) cq->queue = u_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) k_wc->head = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) k_wc->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) cq->kqueue = k_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) spin_unlock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (u_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) vfree(old_u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) vfree(old_k_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (cq->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct rvt_mmap_info *ip = cq->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) rvt_update_mmap_info(rdi, ip, sz, u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Return the offset to mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * See rvt_mmap() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ret = ib_copy_to_udata(udata, &ip->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sizeof(ip->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) spin_lock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (list_empty(&ip->pending_mmaps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_unlock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) bail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) spin_unlock_irq(&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) bail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) vfree(u_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) vfree(k_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * rvt_poll_cq - poll for work completion entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @ibcq: the completion queue to poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @num_entries: the maximum number of entries to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @entry: pointer to array where work completions are placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * This may be called from interrupt context. Also called by ib_poll_cq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * in the generic verbs code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Return: the number of completion entries polled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct rvt_k_cq_wc *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int npolled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* The kernel can only poll a kernel completion queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (cq->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spin_lock_irqsave(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) wc = cq->kqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) tail = wc->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (tail > (u32)cq->ibcq.cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tail = (u32)cq->ibcq.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (tail == wc->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* The kernel doesn't need a RMB since it has the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *entry = wc->kqueue[tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (tail >= cq->ibcq.cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) wc->tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_unlock_irqrestore(&cq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return npolled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * rvt_driver_cq_init - Init cq resources on behalf of driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int rvt_driver_cq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 0, "rdmavt_cq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!comp_vector_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * rvt_cq_exit - tear down cq reources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) void rvt_cq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) destroy_workqueue(comp_vector_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) comp_vector_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }