^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2016 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <rdma/rdma_vt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <rdma/rdmavt_qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "mcast.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * rvt_driver_mcast - init resources for multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @rdi: rvt dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * This is per device that registers with rdmavt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Anything that needs setup for multicast on a per driver or per rdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * basis should be done in here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) spin_lock_init(&rdi->n_mcast_grps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @qp: the QP to link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct rvt_mcast_qp *mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!mqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mqp->qp = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct rvt_qp *qp = mqp->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Notify hfi1_destroy_qp() if it is waiting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) rvt_put_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kfree(mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * mcast_alloc - allocate the multicast GID structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @mgid: the multicast GID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @lid: the muilticast LID (host order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * A list of QPs will be attached to this structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct rvt_mcast *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!mcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mcast->mcast_addr.mgid = *mgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mcast->mcast_addr.lid = lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) INIT_LIST_HEAD(&mcast->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) init_waitqueue_head(&mcast->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) atomic_set(&mcast->refcount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void rvt_mcast_free(struct rvt_mcast *mcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct rvt_mcast_qp *p, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) rvt_mcast_qp_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(mcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * rvt_mcast_find - search the global table for the given multicast GID/LID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * NOTE: It is valid to have 1 MLID with multiple MGIDs. It is not valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * to have 1 MGID with multiple MLIDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @ibp: the IB port structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @mgid: the multicast GID to search for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @lid: the multicast LID portion of the multicast address (host order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * The caller is responsible for decrementing the reference count if found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Return: NULL if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u16 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct rvt_mcast *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_lock_irqsave(&ibp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) n = ibp->mcast_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct rvt_mcast *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mcast = rb_entry(n, struct rvt_mcast, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) sizeof(*mgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* MGID/MLID must match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (mcast->mcast_addr.lid == lid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) atomic_inc(&mcast->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) found = mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) spin_unlock_irqrestore(&ibp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) EXPORT_SYMBOL(rvt_mcast_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * mcast_add - insert mcast GID into table and attach QP struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @mcast: the mcast GID table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @mqp: the QP to attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Return: zero if both were added. Return EEXIST if the GID was already in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * the table but the QP was added. Return ESRCH if the QP was already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * attached and neither structure was added. Return EINVAL if the MGID was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * found, but the MLID did NOT match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct rb_node **n = &ibp->mcast_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct rb_node *pn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) spin_lock_irq(&ibp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) while (*n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct rvt_mcast *tmcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct rvt_mcast_qp *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pn = *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = memcmp(mcast->mcast_addr.mgid.raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) tmcast->mcast_addr.mgid.raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) sizeof(mcast->mcast_addr.mgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) n = &pn->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) n = &pn->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret = EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Search the QP list to see if this is already there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (p->qp == mqp->qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ret = ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (tmcast->n_attached ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rdi->dparms.props.max_mcast_qp_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ret = ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) tmcast->n_attached++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) spin_lock(&rdi->n_mcast_grps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spin_unlock(&rdi->n_mcast_grps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ret = ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rdi->n_mcast_grps_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) spin_unlock(&rdi->n_mcast_grps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mcast->n_attached++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) list_add_tail_rcu(&mqp->list, &mcast->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) atomic_inc(&mcast->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rb_link_node(&mcast->rb_node, pn, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_unlock_irq(&ibp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * rvt_attach_mcast - attach a qp to a multicast group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * @ibqp: Infiniband qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * @gid: multicast guid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @lid: multicast lid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct rvt_mcast *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct rvt_mcast_qp *mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Allocate data structures since its better to do this outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * spin locks and it will most likely be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) mcast = rvt_mcast_alloc(gid, lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!mcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) mqp = rvt_mcast_qp_alloc(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!mqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto bail_mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) case ESRCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Neither was used: OK to attach the same QP twice. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) goto bail_mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) case EEXIST: /* The mcast wasn't used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) goto bail_mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) case ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Exceeded the maximum number of mcast groups. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto bail_mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Invalid MGID/MLID pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto bail_mqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) bail_mqp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rvt_mcast_qp_free(mqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bail_mcast:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rvt_mcast_free(mcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * rvt_detach_mcast - remove a qp from a multicast group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @ibqp: Infiniband qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @gid: multicast guid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * @lid: multicast lid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct rvt_mcast *mcast = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct rvt_mcast_qp *p, *tmp, *delp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (ibqp->qp_num <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_lock_irq(&ibp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Find the GID in the mcast table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) n = ibp->mcast_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_irq(&ibp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mcast = rb_entry(n, struct rvt_mcast, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sizeof(*gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* MGID/MLID must match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (mcast->mcast_addr.lid != lid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_unlock_irq(&ibp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Search the QP list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (p->qp != qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * We found it, so remove it, but don't poison the forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * link until we are sure there are no list walkers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) list_del_rcu(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) mcast->n_attached--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) delp = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* If this was the last attached QP, remove the GID too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (list_empty(&mcast->qp_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rb_erase(&mcast->rb_node, &ibp->mcast_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_unlock_irq(&ibp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* QP not attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!delp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Wait for any list walkers to finish before freeing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * list element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) rvt_mcast_qp_free(delp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) atomic_dec(&mcast->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) wait_event(mcast->wait, !atomic_read(&mcast->refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) rvt_mcast_free(mcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_lock_irq(&rdi->n_mcast_grps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) rdi->n_mcast_grps_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) spin_unlock_irq(&rdi->n_mcast_grps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *@rdi: rvt dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Return: in use count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) for (i = 0; i < rdi->dparms.nports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (rdi->ports[i]->mcast_tree.rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) in_use++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }