^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2016 - 2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <rdma/ib_hdrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <rdma/opa_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "vt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define RVT_RWQ_COUNT_THRESHOLD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void rvt_rc_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) enum ib_qp_type type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Convert the AETH RNR timeout code into the number of microseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static const u32 ib_rvt_rnr_table[32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 655360, /* 00: 655.36 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 10, /* 01: .01 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 20, /* 02 .02 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 30, /* 03: .03 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 40, /* 04: .04 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 60, /* 05: .06 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 80, /* 06: .08 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 120, /* 07: .12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 160, /* 08: .16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 240, /* 09: .24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 320, /* 0A: .32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 480, /* 0B: .48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 640, /* 0C: .64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 960, /* 0D: .96 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 1280, /* 0E: 1.28 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 1920, /* 0F: 1.92 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 2560, /* 10: 2.56 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 3840, /* 11: 3.84 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 5120, /* 12: 5.12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 7680, /* 13: 7.68 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 10240, /* 14: 10.24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 15360, /* 15: 15.36 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 20480, /* 16: 20.48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 30720, /* 17: 30.72 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) 40960, /* 18: 40.96 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) 61440, /* 19: 61.44 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) 81920, /* 1A: 81.92 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) 122880, /* 1B: 122.88 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) 163840, /* 1C: 163.84 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 245760, /* 1D: 245.76 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 327680, /* 1E: 327.68 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 491520 /* 1F: 491.52 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Note that it is OK to post send work requests in the SQE and ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * states; rvt_do_send() will process them and generate error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * completions as per IB 1.2 C10-96.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) [IB_QPS_RESET] = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) [IB_QPS_INIT] = RVT_POST_RECV_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) RVT_PROCESS_NEXT_SEND_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) RVT_POST_SEND_OK | RVT_FLUSH_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) RVT_POST_SEND_OK | RVT_FLUSH_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL(ib_rvt_state_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* platform specific: return the last level cache (llc) size, in KiB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int rvt_wss_llc_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* assume that the boot CPU value is universal for all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return boot_cpu_data.x86_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* platform specific: cacheless copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void cacheless_memcpy(void *dst, void *src, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Use the only available X64 cacheless copy. Add a __user cast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * to quiet sparse. The src agument is already in the kernel so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * there are no security issues. The extra fault recovery machinery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * is not invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __copy_user_nocache(dst, (void __user *)src, n, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void rvt_wss_exit(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct rvt_wss *wss = rdi->wss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!wss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* coded to handle partially initialized and repeat callers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) kfree(wss->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) wss->entries = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) kfree(rdi->wss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) rdi->wss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * rvt_wss_init - Init wss data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int rvt_wss_init(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int wss_threshold = rdi->dparms.wss_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) long llc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) long llc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) long table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) long table_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct rvt_wss *wss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int node = rdi->dparms.node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rdi->wss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!rdi->wss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) wss = rdi->wss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* check for a valid percent range - default to 80 if none or invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (wss_threshold < 1 || wss_threshold > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) wss_threshold = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* reject a wildly large period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (wss_clean_period > 1000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) wss_clean_period = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* reject a zero period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (wss_clean_period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) wss_clean_period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Calculate the table size - the next power of 2 larger than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * LLC size. LLC size is in KiB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) llc_size = rvt_wss_llc_size() * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) table_size = roundup_pow_of_two(llc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* one bit per page in rounded up table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) llc_bits = llc_size / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) table_bits = table_size / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) wss->pages_mask = table_bits - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) wss->num_entries = table_bits / BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) wss->threshold = (llc_bits * wss_threshold) / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (wss->threshold == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) wss->threshold = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) wss->clean_period = wss_clean_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) atomic_set(&wss->clean_counter, wss_clean_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!wss->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) rvt_wss_exit(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Advance the clean counter. When the clean period has expired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * clean an entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * This is implemented in atomics to avoid locking. Because multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * variables are involved, it can be racy which can lead to slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * inaccurate information. Since this is only a heuristic, this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * OK. Any innaccuracies will clean themselves out as the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * advances. That said, it is unlikely the entry clean operation will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * race - the next possible racer will not start until the next clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * The clean counter is implemented as a decrement to zero. When zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * is reached an entry is cleaned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void wss_advance_clean_counter(struct rvt_wss *wss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* become the cleaner if we decrement the counter to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (atomic_dec_and_test(&wss->clean_counter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Set, not add, the clean period. This avoids an issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * where the counter could decrement below the clean period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Doing a set can result in lost decrements, slowing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * clean advance. Since this a heuristic, this possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * slowdown is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * An alternative is to loop, advancing the counter by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * clean period until the result is > 0. However, this could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * lead to several threads keeping another in the clean loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * This could be mitigated by limiting the number of times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * we stay in the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) atomic_set(&wss->clean_counter, wss->clean_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Uniquely grab the entry to clean and move to next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * The current entry is always the lower bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * wss.clean_entry. The table size, wss.num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * is always a power-of-2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) entry = (atomic_inc_return(&wss->clean_entry) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) & (wss->num_entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* clear the entry and count the bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bits = xchg(&wss->entries[entry], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) weight = hweight64((u64)bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* only adjust the contended total count if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) atomic_sub(weight, &wss->total_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Insert the given address into the working set array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void wss_insert(struct rvt_wss *wss, void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 nr = page & (BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!test_and_set_bit(nr, &wss->entries[entry]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) atomic_inc(&wss->total_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) wss_advance_clean_counter(wss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Is the working set larger than the threshold?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return atomic_read(&wss->total_count) >= wss->threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void get_map_page(struct rvt_qpn_table *qpt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct rvt_qpn_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long page = get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Free the page if someone raced with us installing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) spin_lock(&qpt->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (map->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) map->page = (void *)page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock(&qpt->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * init_qpn_table - initialize the QP number table for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @qpt: the QPN table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 offset, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct rvt_qpn_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_lock_init(&qpt->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) qpt->last = rdi->dparms.qpn_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Drivers may want some QPs beyond what we need for verbs let them use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * our qpn table. No need for two. Lets go ahead and mark the bitmaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * for those. The reserved range must be *after* the range which verbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * will pick from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Figure out number of bit maps needed before reserved range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* This should always be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Starting with the first reserved bit map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) map = &qpt->map[qpt->nmaps];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!map->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) get_map_page(qpt, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!map->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) set_bit(offset, map->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (offset == RVT_BITS_PER_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) qpt->nmaps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) map++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * free_qpn_table - free the QP number table for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @qpt: the QPN table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void free_qpn_table(struct rvt_qpn_table *qpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) free_page((unsigned long)qpt->map[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * rvt_driver_qp_init - Init driver qp resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * @rdi: rvt dev strucutre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int rvt_driver_qp_init(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!rdi->dparms.qp_table_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * If driver is not doing any QP allocation then make sure it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * providing the necessary QP functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!rdi->driver_f.free_all_qps ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) !rdi->driver_f.qp_priv_alloc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) !rdi->driver_f.qp_priv_free ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) !rdi->driver_f.notify_qp_reset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) !rdi->driver_f.notify_restart_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* allocate parent object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!rdi->qp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* allocate hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rdi->qp_dev->qp_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) kmalloc_array_node(rdi->qp_dev->qp_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) sizeof(*rdi->qp_dev->qp_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) GFP_KERNEL, rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!rdi->qp_dev->qp_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto no_qp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_lock_init(&rdi->qp_dev->qpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* initialize qpn map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto fail_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) spin_lock_init(&rdi->n_qps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) fail_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) kfree(rdi->qp_dev->qp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) free_qpn_table(&rdi->qp_dev->qpn_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) no_qp_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kfree(rdi->qp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * rvt_free_qp_cb - callback function to reset a qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @qp: the qp to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @v: a 64-bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * This function resets the qp and removes it from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * qp hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int *qp_inuse = (unsigned int *)v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Reset the qp and remove it from the qp hash list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Increment the qp_inuse count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) (*qp_inuse)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * rvt_free_all_qps - check for QPs still in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @rdi: rvt device info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * There should not be any QPs still in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * Free memory for table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * Return the number of QPs still in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned int qp_inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) qp_inuse += rvt_mcast_tree_empty(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return qp_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * rvt_qp_exit - clean up qps on device exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * @rdi: rvt dev structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Check for qp leaks and free resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) void rvt_qp_exit(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u32 qps_inuse = rvt_free_all_qps(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (qps_inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) qps_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!rdi->qp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) kfree(rdi->qp_dev->qp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) free_qpn_table(&rdi->qp_dev->qpn_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) kfree(rdi->qp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct rvt_qpn_map *map, unsigned off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * alloc_qpn - Allocate the next available qpn or zero/one for QP type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * IB_QPT_SMI/IB_QPT_GSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @rdi: rvt device info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @qpt: queue pair number table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @port_num: IB port number, 1 based, comes from core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @exclude_prefix: prefix of special queue pair number being allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Return: The queue pair number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) u32 i, offset, max_scan, qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct rvt_qpn_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) RVT_AIP_QPN_MAX : RVT_QPN_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (rdi->driver_f.alloc_qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ret = type == IB_QPT_GSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) n = 1 << (ret + 2 * (port_num - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_lock(&qpt->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (qpt->flags & n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) qpt->flags |= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_unlock(&qpt->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) qpn = qpt->last + qpt->incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (qpn >= max_qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) qpn = qpt->incr | ((qpt->last & 1) ^ 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* offset carries bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) offset = qpn & RVT_BITS_PER_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) max_scan = qpt->nmaps - !offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) for (i = 0;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (unlikely(!map->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) get_map_page(qpt, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (unlikely(!map->page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!test_and_set_bit(offset, map->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) qpt->last = qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) offset += qpt->incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * This qpn might be bogus if offset >= BITS_PER_PAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * That is OK. It gets re-assigned below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) qpn = mk_qpn(qpt, map, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * In order to keep the number of pages allocated to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * minimum, we scan the all existing pages before increasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * the size of the bitmap table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (++i > max_scan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) map = &qpt->map[qpt->nmaps++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* start at incr with current bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) offset = qpt->incr | (offset & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) } else if (map < &qpt->map[qpt->nmaps]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ++map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* start at incr with current bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) offset = qpt->incr | (offset & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) map = &qpt->map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* wrap to first map page, invert bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) offset = qpt->incr | ((offset & 1) ^ 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* there can be no set bits in low-order QoS bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) WARN_ON(rdi->dparms.qos_shift > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) qpn = mk_qpn(qpt, map, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * rvt_clear_mr_refs - Drop help mr refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * @qp: rvt qp data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * @clr_sends: If shoudl clear send side or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rvt_put_ss(&qp->s_rdma_read_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) rvt_put_ss(&qp->r_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (clr_sends) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) while (qp->s_last != qp->s_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rvt_put_qp_swqe(qp, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (++qp->s_last >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) qp->s_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) smp_wmb(); /* see qp_set_savail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (qp->s_rdma_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) rvt_put_mr(qp->s_rdma_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) qp->s_rdma_mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct rvt_ack_entry *e = &qp->s_ack_queue[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (e->rdma_sge.mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) rvt_put_mr(e->rdma_sge.mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) e->rdma_sge.mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * rvt_swqe_has_lkey - return true if lkey is used by swqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * @wqe - the send wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * @lkey - the lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Test the swqe for using lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) for (i = 0; i < wqe->wr.num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct rvt_sge *sge = &wqe->sg_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (rvt_mr_has_lkey(sge->mr, lkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * rvt_qp_sends_has_lkey - return true is qp sends use lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * @qp - the rvt_qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * @lkey - the lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 s_last = qp->s_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) while (s_last != qp->s_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (rvt_swqe_has_lkey(wqe, lkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (++s_last >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) s_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (qp->s_rdma_mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * rvt_qp_acks_has_lkey - return true if acks have lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * @qp - the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * @lkey - the lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct rvt_ack_entry *e = &qp->s_ack_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * rvt_qp_mr_clean - clean up remote ops for lkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * @qp - the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * @lkey - the lkey that is being de-registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * This routine checks if the lkey is being used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * the qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * If so, the qp is put into an error state to elminate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * any references from the qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) bool lastwqe = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (qp->ibqp.qp_type == IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) qp->ibqp.qp_type == IB_QPT_GSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* avoid special QPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) spin_lock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) spin_lock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto check_lwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rvt_qp_sends_has_lkey(qp, lkey) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rvt_qp_acks_has_lkey(qp, lkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) check_lwqe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) spin_unlock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) spin_unlock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (lastwqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ev.device = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ev.element.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * rvt_remove_qp - remove qp form table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * @rdi: rvt dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @qp: qp to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * Remove the QP from the table so it can't be found asynchronously by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * the receive routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (rcu_dereference_protected(rvp->qp[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) RCU_INIT_POINTER(rvp->qp[0], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) } else if (rcu_dereference_protected(rvp->qp[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) RCU_INIT_POINTER(rvp->qp[1], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct rvt_qp *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct rvt_qp __rcu **qpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) removed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) qpp = &rdi->qp_dev->qp_table[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) for (; (q = rcu_dereference_protected(*qpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) qpp = &q->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (q == qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) RCU_INIT_POINTER(*qpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) rcu_dereference_protected(qp->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) lockdep_is_held(&rdi->qp_dev->qpt_lock)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) trace_rvt_qpremove(qp, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) rvt_put_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * rvt_alloc_rq - allocate memory for user or kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * @rq: receive queue data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * @size: number of request queue entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @node: The NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * @udata: True if user data is available or not false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * Return: If memory allocation failed, return -ENONEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * This function is used by both shared receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * queues and non-shared receive queues to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (!rq->wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* need kwq with no buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!rq->kwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) rq->kwq->curr_wq = rq->wq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* need kwq with buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) rq->kwq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) vzalloc_node(sizeof(struct rvt_krwq) + size, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!rq->kwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rq->kwq->curr_wq = rq->kwq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) spin_lock_init(&rq->kwq->p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) spin_lock_init(&rq->kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rvt_free_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * rvt_init_qp - initialize the QP state to the reset state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * @qp: the QP to init or reinit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * @type: the QP type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * This function is called from both rvt_create_qp() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * rvt_reset_qp(). The difference is that the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * patch the necessary locks to protect against concurent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) enum ib_qp_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) qp->remote_qpn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) qp->qkey = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) qp->qp_access_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) qp->s_hdrwords = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) qp->s_wqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) qp->s_draining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) qp->s_next_psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) qp->s_last_psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) qp->s_sending_psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) qp->s_sending_hpsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) qp->s_psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) qp->r_psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) qp->r_msn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) qp->s_state = IB_OPCODE_RC_SEND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) qp->r_state = IB_OPCODE_RC_SEND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) qp->s_state = IB_OPCODE_UC_SEND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) qp->r_state = IB_OPCODE_UC_SEND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) qp->r_nak_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) qp->r_aflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) qp->r_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) qp->s_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) qp->s_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) qp->s_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) qp->s_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) qp->s_ssn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) qp->s_lsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) qp->s_mig_state = IB_MIG_MIGRATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) qp->r_head_ack_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) qp->s_tail_ack_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) qp->s_acked_ack_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) qp->s_num_rd_atomic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) qp->r_sge.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) atomic_set(&qp->s_reserved_used, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * _rvt_reset_qp - initialize the QP state to the reset state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @qp: the QP to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @type: the QP type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * r_lock, s_hlock, and s_lock are required to be held by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) enum ib_qp_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) __must_hold(&qp->s_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) __must_hold(&qp->s_hlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) __must_hold(&qp->r_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) lockdep_assert_held(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) lockdep_assert_held(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (qp->state != IB_QPS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) qp->state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Let drivers flush their waitlist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) rdi->driver_f.flush_qp_waiters(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_unlock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) spin_unlock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Stop the send queue and the retry timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rdi->driver_f.stop_send_queue(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rvt_del_timers_sync(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* Wait for things to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) rdi->driver_f.quiesce_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* take qp out the hash and wait for it to be unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) rvt_remove_qp(rdi, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* grab the lock b/c it was locked at call time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) spin_lock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_lock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) rvt_clear_mr_refs(qp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Let the driver do any tear down or re-init it needs to for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * a qp that has been reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rdi->driver_f.notify_qp_reset(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rvt_init_qp(rdi, qp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) lockdep_assert_held(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) lockdep_assert_held(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * rvt_reset_qp - initialize the QP state to the reset state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * @rdi: the device info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * @qp: the QP to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * @type: the QP type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * before calling _rvt_reset_qp().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) enum ib_qp_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_lock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) spin_lock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) _rvt_reset_qp(rdi, qp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) spin_unlock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) spin_unlock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /** rvt_free_qpn - Free a qpn from the bit map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * @qpt: QP table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * @qpn: queue pair number to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct rvt_qpn_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) qpn &= RVT_AIP_QP_SUFFIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (map->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * get_allowed_ops - Given a QP type return the appropriate allowed OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * @type: valid, supported, QP type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static u8 get_allowed_ops(enum ib_qp_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) IB_OPCODE_UC : IB_OPCODE_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * @qp: Valid QP with allowed_ops set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * The rvt_swqe data structure being used is a union, so this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * only valid for UD QPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static void free_ud_wq_attr(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) wqe = rvt_get_swqe_ptr(qp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kfree(wqe->ud_wr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) wqe->ud_wr.attr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * alloc_ud_wq_attr - AH attribute cache for UD QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * @qp: Valid QP with allowed_ops set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * @node: Numa node for allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * The rvt_swqe data structure being used is a union, so this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * only valid for UD QPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) wqe = rvt_get_swqe_ptr(qp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (!wqe->ud_wr.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) free_ud_wq_attr(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * rvt_create_qp - create a queue pair for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * @ibpd: the protection domain who's device we create the queue pair for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * @init_attr: the attributes of the queue pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * @udata: user data for libibverbs.so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * Queue pair creation is mostly an rvt issue. However, drivers have their own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * unique idea of what queue pair numbers mean. For instance there is a reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * range for PSM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Return: the queue pair on success, otherwise returns an errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Called by the ib_create_qp() core verbs function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct ib_qp_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct rvt_swqe *swq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) size_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) size_t sg_list_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct ib_qp *ret = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) void *priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) size_t sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) u8 exclude_prefix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (!rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) (init_attr->create_flags &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Check receive queue parameters if no SRQ is specified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (!init_attr->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (init_attr->cap.max_recv_sge >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) rdi->dparms.props.max_recv_sge ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (init_attr->cap.max_send_sge +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) init_attr->cap.max_send_wr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) init_attr->cap.max_recv_sge +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) init_attr->cap.max_recv_wr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) sqsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) init_attr->cap.max_send_wr + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) rdi->dparms.reserved_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) switch (init_attr->qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) case IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) case IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (init_attr->port_num == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) init_attr->port_num > ibpd->device->phys_port_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!swq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) sz = sizeof(*qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) sg_list_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (init_attr->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (srq->rq.max_sge > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) sg_list_sz = sizeof(*qp->r_sg_list) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) (srq->rq.max_sge - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) } else if (init_attr->cap.max_recv_sge > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) sg_list_sz = sizeof(*qp->r_sg_list) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) (init_attr->cap.max_recv_sge - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) goto bail_swq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) RCU_INIT_POINTER(qp->next, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (init_attr->qp_type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) qp->s_ack_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) kcalloc_node(rvt_max_atomic(rdi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) sizeof(*qp->s_ack_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!qp->s_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) goto bail_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* initialize timers needed for rc qp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) qp->s_rnr_timer.function = rvt_rc_rnr_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Driver needs to set up it's private QP structure and do any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * initialization that is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (IS_ERR(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ret = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) goto bail_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) qp->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) qp->timeout_jiffies =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 1000UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (init_attr->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) sizeof(struct rvt_rwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) rdi->dparms.node, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ret = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto bail_driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * ib_create_qp() will initialize qp->ibqp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * except for qp->ibqp.qp_num.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) spin_lock_init(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) spin_lock_init(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) spin_lock_init(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) atomic_set(&qp->refcount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) atomic_set(&qp->local_ops_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) init_waitqueue_head(&qp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) INIT_LIST_HEAD(&qp->rspwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) qp->state = IB_QPS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) qp->s_wq = swq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) qp->s_size = sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) qp->s_avail = init_attr->cap.max_send_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) qp->s_max_sge = init_attr->cap.max_send_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) qp->s_flags = RVT_S_SIGNAL_REQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) err = alloc_ud_wq_attr(qp, rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) ret = (ERR_PTR(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) goto bail_rq_rvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) exclude_prefix = RVT_AIP_QP_PREFIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) init_attr->qp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) init_attr->port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) exclude_prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ret = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto bail_rq_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) qp->ibqp.qp_num = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) qp->port_num = init_attr->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) rvt_init_qp(rdi, qp, init_attr->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (rdi->driver_f.qp_priv_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ret = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) goto bail_rq_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Don't support raw QPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) init_attr->cap.max_inline_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * Return the address of the RWQ as the offset to mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * See rvt_mmap() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!qp->r_rq.wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) __u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) err = ib_copy_to_udata(udata, &offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) sizeof(offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ret = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) goto bail_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) qp->ip = rvt_create_mmap_info(rdi, s, udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) qp->r_rq.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (IS_ERR(qp->ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ret = ERR_CAST(qp->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto bail_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) err = ib_copy_to_udata(udata, &qp->ip->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) sizeof(qp->ip->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ret = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) goto bail_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) qp->pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) spin_lock(&rdi->n_qps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) spin_unlock(&rdi->n_qps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ret = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) goto bail_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) rdi->n_qps_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * Maintain a busy_jiffies variable that will be added to the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * period in mod_retry_timer and add_retry_timer. This busy jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * is scaled by the number of rc qps created for the device to reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * the number of timeouts occurring when there is a large number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * qps. busy_jiffies is incremented every rc qp scaling interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * The scaling interval is selected based on extensive performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * evaluation of targeted workloads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (init_attr->qp_type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) rdi->n_rc_qps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) spin_unlock(&rdi->n_qps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (qp->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) spin_lock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) spin_unlock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ret = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) bail_ip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (qp->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) kref_put(&qp->ip->ref, rvt_release_mmap_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) bail_qpn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) bail_rq_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) free_ud_wq_attr(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) bail_rq_rvt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) rvt_free_rq(&qp->r_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bail_driver_priv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) rdi->driver_f.qp_priv_free(rdi, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) bail_qp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) kfree(qp->s_ack_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) bail_swq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) vfree(swq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * rvt_error_qp - put a QP into the error state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * @qp: the QP to put into the error state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * @err: the receive completion error to signal if a RWQE is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Flushes both send and receive work queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * Return: true if last WQE event should be generated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * The QP r_lock and s_lock should be held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * If we are already in error state, just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) lockdep_assert_held(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) qp->state = IB_QPS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) del_timer(&qp->s_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) rdi->driver_f.notify_error_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Schedule the sending tasklet to drain the send work queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (READ_ONCE(qp->s_last) != qp->s_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) rdi->driver_f.schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) rvt_clear_mr_refs(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) memset(&wc, 0, sizeof(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) wc.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) wc.opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) wc.wr_id = qp->r_wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) wc.status = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) wc.status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (qp->r_rq.kwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) u32 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct rvt_rwq *wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct rvt_krwq *kwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) spin_lock(&qp->r_rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /* qp->ip used to validate if there is a user buffer mmaped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (qp->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) wq = qp->r_rq.wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) head = RDMA_READ_UAPI_ATOMIC(wq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) kwq = qp->r_rq.kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) head = kwq->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) tail = kwq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* sanity check pointers before trusting them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (head >= qp->r_rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (tail >= qp->r_rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) while (tail != head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (++tail >= qp->r_rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (qp->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) kwq->tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) spin_unlock(&qp->r_rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) } else if (qp->ibqp.event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) EXPORT_SYMBOL(rvt_error_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * Put the QP into the hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * The hash table holds a reference to the QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (qp->ibqp.qp_num <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) qp->next = rdi->qp_dev->qp_table[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) trace_rvt_qpinsert(qp, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * rvt_modify_qp - modify the attributes of a queue pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * @ibqp: the queue pair who's attributes we're modifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * @attr: the new attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * @attr_mask: the mask of attributes to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * @udata: user data for libibverbs.so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * Return: 0 on success, otherwise returns an errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) enum ib_qp_state cur_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) int lastwqe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int mig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int pmtu = 0; /* for gcc warning only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) int opa_ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) spin_lock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) spin_lock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) cur_state = attr_mask & IB_QP_CUR_STATE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) attr->cur_qp_state : qp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) attr_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (rdi->driver_f.check_modify_qp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (attr_mask & IB_QP_AV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (opa_ah) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (rdma_ah_get_dlid(&attr->ah_attr) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) opa_get_mcast_base(OPA_MCAST_NR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (rdma_ah_get_dlid(&attr->ah_attr) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) be16_to_cpu(IB_MULTICAST_LID_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (attr_mask & IB_QP_ALT_PATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (opa_ah) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) opa_get_mcast_base(OPA_MCAST_NR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) be16_to_cpu(IB_MULTICAST_LID_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (attr_mask & IB_QP_PKEY_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (attr->pkey_index >= rvt_get_npkeys(rdi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (attr_mask & IB_QP_MIN_RNR_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (attr->min_rnr_timer > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (attr_mask & IB_QP_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (qp->ibqp.qp_type == IB_QPT_SMI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) qp->ibqp.qp_type == IB_QPT_GSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) attr->port_num == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) attr->port_num > ibqp->device->phys_port_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (attr_mask & IB_QP_DEST_QPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (attr->dest_qp_num > RVT_QPN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (attr_mask & IB_QP_RETRY_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (attr->retry_cnt > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (attr_mask & IB_QP_RNR_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (attr->rnr_retry > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * Don't allow invalid path_mtu values. OK to set greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * than the active mtu (or even the max_cap, if we have tuned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * that to a small mtu. We'll set qp->path_mtu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * to the lesser of requested attribute mtu and active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * for packetizing messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Note that the QP port has to be set in INIT and MTU in RTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (attr_mask & IB_QP_PATH_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (pmtu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (attr_mask & IB_QP_PATH_MIG_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (attr->path_mig_state == IB_MIG_REARM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (qp->s_mig_state == IB_MIG_ARMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (new_state != IB_QPS_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (qp->s_mig_state == IB_MIG_REARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (qp->s_mig_state == IB_MIG_ARMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) mig = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) case IB_QPS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (qp->state != IB_QPS_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) _rvt_reset_qp(rdi, qp, ibqp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) case IB_QPS_RTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* Allow event to re-trigger if QP set to RTR more than once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) qp->r_flags &= ~RVT_R_COMM_EST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) qp->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) case IB_QPS_SQD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) qp->s_draining = qp->s_last != qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) qp->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) case IB_QPS_SQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (qp->ibqp.qp_type == IB_QPT_RC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) qp->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) case IB_QPS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) qp->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (attr_mask & IB_QP_PKEY_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) qp->s_pkey_index = attr->pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (attr_mask & IB_QP_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) qp->port_num = attr->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (attr_mask & IB_QP_DEST_QPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) qp->remote_qpn = attr->dest_qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (attr_mask & IB_QP_SQ_PSN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) qp->s_psn = qp->s_next_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) qp->s_sending_psn = qp->s_next_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) qp->s_last_psn = qp->s_next_psn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) qp->s_sending_hpsn = qp->s_last_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (attr_mask & IB_QP_RQ_PSN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (attr_mask & IB_QP_ACCESS_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) qp->qp_access_flags = attr->qp_access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (attr_mask & IB_QP_AV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (attr_mask & IB_QP_ALT_PATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) qp->s_alt_pkey_index = attr->alt_pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (attr_mask & IB_QP_PATH_MIG_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) qp->s_mig_state = attr->path_mig_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (mig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) qp->remote_ah_attr = qp->alt_ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) qp->s_pkey_index = qp->s_alt_pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (attr_mask & IB_QP_PATH_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) qp->log_pmtu = ilog2(qp->pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (attr_mask & IB_QP_RETRY_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) qp->s_retry_cnt = attr->retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) qp->s_retry = attr->retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (attr_mask & IB_QP_RNR_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) qp->s_rnr_retry_cnt = attr->rnr_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) qp->s_rnr_retry = attr->rnr_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (attr_mask & IB_QP_MIN_RNR_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) qp->r_min_rnr_timer = attr->min_rnr_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (attr_mask & IB_QP_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) qp->timeout = attr->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (attr_mask & IB_QP_QKEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) qp->qkey = attr->qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) qp->s_max_rd_atomic = attr->max_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (rdi->driver_f.modify_qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) spin_unlock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) spin_unlock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) rvt_insert_qp(rdi, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (lastwqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) ev.device = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) ev.element.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (mig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ev.device = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ev.element.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ev.event = IB_EVENT_PATH_MIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) inval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) spin_unlock(&qp->s_hlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) spin_unlock_irq(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * rvt_destroy_qp - destroy a queue pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * @ibqp: the queue pair to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * Note that this can be called while the QP is actively sending or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * receiving!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * Return: 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) rvt_reset_qp(rdi, qp, ibqp->qp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) wait_event(qp->wait, !atomic_read(&qp->refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /* qpn is now available for use again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) spin_lock(&rdi->n_qps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) rdi->n_qps_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (qp->ibqp.qp_type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) rdi->n_rc_qps--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) spin_unlock(&rdi->n_qps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (qp->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) kref_put(&qp->ip->ref, rvt_release_mmap_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) kvfree(qp->r_rq.kwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) rdi->driver_f.qp_priv_free(rdi, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) kfree(qp->s_ack_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) rdma_destroy_ah_attr(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) rdma_destroy_ah_attr(&qp->alt_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) free_ud_wq_attr(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) vfree(qp->s_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) kfree(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * rvt_query_qp - query an ipbq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * @ibqp: IB qp to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * @attr: attr struct to fill in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * @attr_mask: attr mask ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * @init_attr: struct to fill in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * Return: always 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) int attr_mask, struct ib_qp_init_attr *init_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) attr->qp_state = qp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) attr->cur_qp_state = attr->qp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) attr->path_mig_state = qp->s_mig_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) attr->qkey = qp->qkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) attr->dest_qp_num = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) attr->qp_access_flags = qp->qp_access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) attr->cap.max_send_wr = qp->s_size - 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) rdi->dparms.reserved_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) attr->cap.max_send_sge = qp->s_max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) attr->cap.max_recv_sge = qp->r_rq.max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) attr->cap.max_inline_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) attr->ah_attr = qp->remote_ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) attr->alt_ah_attr = qp->alt_ah_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) attr->pkey_index = qp->s_pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) attr->alt_pkey_index = qp->s_alt_pkey_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) attr->en_sqd_async_notify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) attr->sq_draining = qp->s_draining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) attr->max_rd_atomic = qp->s_max_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) attr->min_rnr_timer = qp->r_min_rnr_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) attr->port_num = qp->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) attr->timeout = qp->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) attr->retry_cnt = qp->s_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) attr->rnr_retry = qp->s_rnr_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) attr->alt_port_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) rdma_ah_get_port_num(&qp->alt_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) attr->alt_timeout = qp->alt_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) init_attr->event_handler = qp->ibqp.event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) init_attr->qp_context = qp->ibqp.qp_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) init_attr->send_cq = qp->ibqp.send_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) init_attr->recv_cq = qp->ibqp.recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) init_attr->srq = qp->ibqp.srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) init_attr->cap = attr->cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) init_attr->qp_type = qp->ibqp.qp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) init_attr->port_num = qp->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * rvt_post_receive - post a receive on a QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * @ibqp: the QP to post the receive on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * @wr: the WR to post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * @bad_wr: the first bad WR is put here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * This may be called from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * Return: 0 on success otherwise errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct rvt_krwq *wq = qp->r_rq.kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) !qp->ibqp.srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* Check that state is OK to post receive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) for (; wr; wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) struct rvt_rwqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) next = wq->head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (next >= qp->r_rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (next == READ_ONCE(wq->tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (unlikely(qp_err_flush)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) memset(&wc, 0, sizeof(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) wc.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) wc.opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) wc.wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) wc.status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) wqe->wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) wqe->num_sge = wr->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) wqe->sg_list[i].addr = wr->sg_list[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) wqe->sg_list[i].length = wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * Make sure queue entry is written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * before the head index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) smp_store_release(&wq->head, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * rvt_qp_valid_operation - validate post send wr request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * @qp - the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * @post-parms - the post send table for the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * @wr - the work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * The routine validates the operation based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * validation table an returns the length of the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * which can extend beyond the ib_send_bw. Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * dependent flags key atomic operation validation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * There is an exception for UD qps that validates the pd and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * overrides the length to include the additional UD specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * Returns a negative error or the length of the work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * for building the swqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static inline int rvt_qp_valid_operation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) const struct rvt_operation_params *post_parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) ibpd_to_rvtpd(qp->ibqp.pd)->user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) (wr->num_sge == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) wr->sg_list[0].length < sizeof(u64) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) wr->sg_list[0].addr & (sizeof(u64) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) !qp->s_max_rd_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) len = post_parms[wr->opcode].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /* UD specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (qp->ibqp.qp_type != IB_QPT_UC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) qp->ibqp.qp_type != IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) len = sizeof(struct ib_ud_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * rvt_qp_is_avail - determine queue capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * @qp: the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * @rdi: the rdmavt device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * @reserved_op: is reserved operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * This assumes the s_hlock is held but the s_last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * qp variable is uncontrolled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * For non reserved operations, the qp->s_avail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * may be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * The return value is zero or a -ENOMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static inline int rvt_qp_is_avail(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) struct rvt_dev_info *rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) bool reserved_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) u32 slast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) u32 avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) u32 reserved_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* see rvt_qp_wqe_unreserve() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (unlikely(reserved_op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /* see rvt_qp_wqe_unreserve() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) reserved_used = atomic_read(&qp->s_reserved_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (reserved_used >= rdi->dparms.reserved_operations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) /* non-reserved operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (likely(qp->s_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /* See rvt_qp_complete_swqe() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) slast = smp_load_acquire(&qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (qp->s_head >= slast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) avail = qp->s_size - (qp->s_head - slast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) avail = slast - qp->s_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) reserved_used = atomic_read(&qp->s_reserved_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) avail = avail - 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) (rdi->dparms.reserved_operations - reserved_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /* insure we don't assign a negative s_avail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if ((s32)avail <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) qp->s_avail = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (WARN_ON(qp->s_avail >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) (qp->s_size - 1 - rdi->dparms.reserved_operations)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) rvt_pr_err(rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) qp->ibqp.qp_num, qp->s_size, qp->s_avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) qp->s_head, qp->s_tail, qp->s_cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) qp->s_acked, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * rvt_post_one_wr - post one RC, UC, or UD send work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * @qp: the QP to post on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * @wr: the work request to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) static int rvt_post_one_wr(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) bool *call_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) int acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) struct rvt_lkey_table *rkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) struct rvt_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) u8 log_pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) size_t cplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) bool reserved_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) int local_ops_delayed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /* IB spec says that num_sge == 0 is OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (unlikely(wr->num_sge > qp->s_max_sge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) cplen = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * Local operations include fast register and local invalidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * Fast register needs to be processed immediately because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * registered lkey may be used by following work requests and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * lkey needs to be valid at the time those requests are posted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * Local invalidate can be processed immediately if fencing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * not required and no previous local invalidate ops are pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * Signaled local operations that have been processed immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * need to have requests with "completion only" flags set posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * to the send queue in order to generate completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) ret = rvt_fast_reg_mr(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) reg_wr(wr)->mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) reg_wr(wr)->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) reg_wr(wr)->access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if ((wr->send_flags & IB_SEND_FENCE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) atomic_read(&qp->local_ops_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) local_ops_delayed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ret = rvt_invalidate_rkey(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) qp, wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) reserved_op = rdi->post_parms[wr->opcode].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) RVT_OPERATION_USE_RESERVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /* check for avail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) ret = rvt_qp_is_avail(qp, rdi, reserved_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) next = qp->s_head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (next >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) rkt = &rdi->lkey_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) pd = ibpd_to_rvtpd(qp->ibqp.pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) wqe = rvt_get_swqe_ptr(qp, qp->s_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* cplen has length from above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) memcpy(&wqe->wr, wr, cplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) wqe->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (wr->num_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) struct rvt_sge *last_sge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) acc = wr->opcode >= IB_WR_RDMA_READ ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) IB_ACCESS_LOCAL_WRITE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) u32 length = wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) &wr->sg_list[i], acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) goto bail_inval_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) wqe->length += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) last_sge = &wqe->sg_list[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) j += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) wqe->wr.num_sge = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * Calculate and set SWQE PSN values prior to handing it off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * to the driver's check routine. This give the driver the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * opportunity to adjust PSN values based on internal checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) log_pmtu = qp->log_pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (qp->allowed_ops == IB_OPCODE_UD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) log_pmtu = ah->log_pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (local_ops_delayed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) atomic_inc(&qp->local_ops_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) wqe->ssn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) wqe->psn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) wqe->lpsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) wqe->ssn = qp->s_ssn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) wqe->psn = qp->s_next_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) wqe->lpsn = wqe->psn +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) (wqe->length ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) ((wqe->length - 1) >> log_pmtu) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /* general part of wqe valid - allow for driver checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (rdi->driver_f.setup_wqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) goto bail_inval_free_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) qp->s_next_psn = wqe->lpsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (unlikely(reserved_op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) rvt_qp_wqe_reserve(qp, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) qp->s_avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) smp_wmb(); /* see request builders */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) qp->s_head = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) bail_inval_free_ref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) if (qp->allowed_ops == IB_OPCODE_UD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) rdma_destroy_ah_attr(wqe->ud_wr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) bail_inval_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) /* release mr holds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) while (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct rvt_sge *sge = &wqe->sg_list[--j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) rvt_put_mr(sge->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * rvt_post_send - post a send on a QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * @ibqp: the QP to post the send on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * @wr: the list of work requests to post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * @bad_wr: the first bad WR is put here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * This may be called from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * Return: 0 on success else errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) bool call_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) unsigned nreq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) spin_lock_irqsave(&qp->s_hlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) * Ensure QP state is such that we can send. If not bail out early,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) * there is no need to do this every time we post a send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) spin_unlock_irqrestore(&qp->s_hlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * If the send queue is empty, and we only have a single WR then just go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * ahead and kick the send engine into gear. Otherwise we will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * just schedule the send to happen later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) for (; wr; wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) err = rvt_post_one_wr(qp, wr, &call_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) nreq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) spin_unlock_irqrestore(&qp->s_hlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (nreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * Only call do_send if there is exactly one packet, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * driver said it was ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (nreq == 1 && call_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) rdi->driver_f.do_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) rdi->driver_f.schedule_send_no_lock(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * rvt_post_srq_receive - post a receive on a shared receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * @ibsrq: the SRQ to post the receive on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * @wr: the list of work requests to post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) * @bad_wr: A pointer to the first WR to cause a problem is put here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * This may be called from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * Return: 0 on success else errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) struct rvt_krwq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) for (; wr; wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) struct rvt_rwqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if ((unsigned)wr->num_sge > srq->rq.max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) wq = srq->rq.kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) next = wq->head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (next >= srq->rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (next == READ_ONCE(wq->tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) *bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) wqe->wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) wqe->num_sge = wr->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) wqe->sg_list[i].addr = wr->sg_list[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) wqe->sg_list[i].length = wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /* Make sure queue entry is written before the head index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) smp_store_release(&wq->head, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * rvt used the internal kernel struct as part of its ABI, for now make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * the kernel struct does not change layout. FIXME: rvt should never cast the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * user struct to a kernel struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) offsetof(struct rvt_wqe_sge, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) offsetof(struct rvt_wqe_sge, length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) offsetof(struct rvt_wqe_sge, lkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return (struct ib_sge *)sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * Validate a RWQE and fill in the SGE state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) * Return 1 if OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) int i, j, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) struct rvt_lkey_table *rkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct rvt_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct rvt_sge_state *ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) rkt = &rdi->lkey_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) ss = &qp->r_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) ss->sg_list = qp->r_sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) qp->r_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) for (i = j = 0; i < wqe->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (wqe->sg_list[i].length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) /* Check LKEY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) NULL, rvt_cast_sge(&wqe->sg_list[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) IB_ACCESS_LOCAL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) goto bad_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) qp->r_len += wqe->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) ss->num_sge = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) ss->total_len = qp->r_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) bad_lkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) while (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) rvt_put_mr(sge->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ss->num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) memset(&wc, 0, sizeof(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) wc.wr_id = wqe->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) wc.status = IB_WC_LOC_PROT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) wc.opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) wc.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /* Signal solicited completion event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * get_rvt_head - get head indices of the circular buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * @rq: data structure for request queue entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * @ip: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * Return - head index value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) u32 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) head = rq->kwq->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * Return -1 if there is a local error, 0 if no RWQE is available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * otherwise return 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * Can be called from interrupt level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) struct rvt_rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) struct rvt_krwq *kwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) struct rvt_rwq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) struct rvt_srq *srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) struct rvt_rwqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) void (*handler)(struct ib_event *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) u32 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) void *ip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (qp->ibqp.srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) handler = srq->ibsrq.event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) rq = &srq->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ip = srq->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) srq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) rq = &qp->r_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) ip = qp->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) spin_lock_irqsave(&rq->kwq->c_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) kwq = rq->kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if (ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) wq = rq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) tail = kwq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* Validate tail before using it since it is user writable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (tail >= rq->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) head = get_rvt_head(rq, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) kwq->count = rvt_get_rq_count(rq, head, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (unlikely(kwq->count == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) /* Make sure entry is read after the count is read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) wqe = rvt_get_rwqe_ptr(rq, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * Even though we update the tail index in memory, the verbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * consumer is not supposed to post more entries until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * completion is generated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (++tail >= rq->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) kwq->tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (!wr_id_only && !init_sge(qp, wqe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) qp->r_wr_id = wqe->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) kwq->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * Validate head pointer value and compute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * the number of remaining WQEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (kwq->count < srq->limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) kwq->count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) rvt_get_rq_count(rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) get_rvt_head(rq, ip), tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (kwq->count < srq->limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) srq->limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) ev.device = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) ev.element.srq = qp->ibqp.srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) handler(&ev, srq->ibsrq.srq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) EXPORT_SYMBOL(rvt_get_rwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) * qp_comm_est - handle trap with QP established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) void rvt_comm_est(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) qp->r_flags |= RVT_R_COMM_EST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (qp->ibqp.event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) ev.device = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) ev.element.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) ev.event = IB_EVENT_COMM_EST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) EXPORT_SYMBOL(rvt_comm_est);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) int lastwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) lastwqe = rvt_error_qp(qp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (lastwqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) ev.device = qp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) ev.element.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) EXPORT_SYMBOL(rvt_rc_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * @index - the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * return usec from an index into ib_rvt_rnr_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) unsigned long rvt_rnr_tbl_to_usec(u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) static inline unsigned long rvt_aeth_to_usec(u32 aeth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) IB_AETH_CREDIT_MASK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * rvt_add_retry_timer_ext - add/start a retry timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * @qp - the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * @shift - timeout shift to wait for multiple packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) * add a retry timer on the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct ib_qp *ibqp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) qp->s_flags |= RVT_S_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) /* 4.096 usec. * (1 << qp->timeout) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) qp->s_timer.expires = jiffies + rdi->busy_jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) (qp->timeout_jiffies << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) add_timer(&qp->s_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) EXPORT_SYMBOL(rvt_add_retry_timer_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * rvt_add_rnr_timer - add/start an rnr timer on the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * @aeth: aeth of RNR timeout, simulated aeth for loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) u32 to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) qp->s_flags |= RVT_S_WAIT_RNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) to = rvt_aeth_to_usec(aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) trace_rvt_rnrnak_add(qp, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) hrtimer_start(&qp->s_rnr_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) EXPORT_SYMBOL(rvt_add_rnr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * rvt_stop_rc_timers - stop all timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * stop any pending timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) void rvt_stop_rc_timers(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) /* Remove QP from all timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) del_timer(&qp->s_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) hrtimer_try_to_cancel(&qp->s_rnr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) EXPORT_SYMBOL(rvt_stop_rc_timers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * rvt_stop_rnr_timer - stop an rnr timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * @qp - the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) * stop an rnr timer and return if the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * had been pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) static void rvt_stop_rnr_timer(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /* Remove QP from rnr timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (qp->s_flags & RVT_S_WAIT_RNR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) qp->s_flags &= ~RVT_S_WAIT_RNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) trace_rvt_rnrnak_stop(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) * rvt_del_timers_sync - wait for any timeout routines to exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) void rvt_del_timers_sync(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) del_timer_sync(&qp->s_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) hrtimer_cancel(&qp->s_rnr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) EXPORT_SYMBOL(rvt_del_timers_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * This is called from s_timer for missing responses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) static void rvt_rc_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) struct rvt_qp *qp = from_timer(qp, t, s_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) spin_lock_irqsave(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (qp->s_flags & RVT_S_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) qp->s_flags &= ~RVT_S_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) rvp->n_rc_timeouts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) del_timer(&qp->s_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (rdi->driver_f.notify_restart_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) rdi->driver_f.notify_restart_rc(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) qp->s_last_psn + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) rdi->driver_f.schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) spin_unlock_irqrestore(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) * This is called from s_timer for RNR timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) rvt_stop_rnr_timer(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) trace_rvt_rnrnak_timeout(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) rdi->driver_f.schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) EXPORT_SYMBOL(rvt_rc_rnr_retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * rvt_qp_iter_init - initial for QP iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * @rdi: rvt devinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * @v: u64 value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * @cb: user-defined callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * This returns an iterator suitable for iterating QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * The @cb is a user-defined callback and @v is a 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) * value passed to and relevant for processing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * @cb. An example use case would be to alter QP processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * based on criteria not part of the rvt_qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * Use cases that require memory allocation to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * must preallocate appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * Return: a pointer to an rvt_qp_iter or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) u64 v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) void (*cb)(struct rvt_qp *qp, u64 v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct rvt_qp_iter *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) i = kzalloc(sizeof(*i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) i->rdi = rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) /* number of special QPs (SMI/GSI) for device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) i->specials = rdi->ibdev.phys_port_cnt * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) i->v = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) i->cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) EXPORT_SYMBOL(rvt_qp_iter_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * rvt_qp_iter_next - return the next QP in iter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) * @iter: the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * Fine grained QP iterator suitable for use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * with debugfs seq_file mechanisms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * Updates iter->qp with the current QP when the return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) * value is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) * Return: 0 - iter->qp is valid 1 - no more QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) int rvt_qp_iter_next(struct rvt_qp_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) __must_hold(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) int n = iter->n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) struct rvt_qp *pqp = iter->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) struct rvt_dev_info *rdi = iter->rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) * The approach is to consider the special qps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) * as additional table entries before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) * real hash table. Since the qp code sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * the qp->next hash link to NULL, this works just fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * iter->specials is 2 * # ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * n = 0..iter->specials is the special qp indices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * the potential hash bucket entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (pqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) qp = rcu_dereference(pqp->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if (n < iter->specials) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct rvt_ibport *rvp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) int pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) pidx = n % rdi->ibdev.phys_port_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) rvp = rdi->ports[pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) qp = rcu_dereference(rvp->qp[n & 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) qp = rcu_dereference(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) rdi->qp_dev->qp_table[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) (n - iter->specials)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) pqp = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) iter->qp = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) iter->n = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) EXPORT_SYMBOL(rvt_qp_iter_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) * rvt_qp_iter - iterate all QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * @rdi: rvt devinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * @v: a 64-bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) * @cb: a callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) * This provides a way for iterating all QPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) * The @cb is a user-defined callback and @v is a 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) * value passed to and relevant for processing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) * cb. An example use case would be to alter QP processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * based on criteria not part of the rvt_qp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * The code has an internal iterator to simplify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * non seq_file use cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) void rvt_qp_iter(struct rvt_dev_info *rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) u64 v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) void (*cb)(struct rvt_qp *qp, u64 v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct rvt_qp_iter i = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) .rdi = rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) .specials = rdi->ibdev.phys_port_cnt * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) .v = v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) .cb = cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) ret = rvt_qp_iter_next(&i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) rvt_get_qp(i.qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) i.cb(i.qp, i.v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) rvt_put_qp(i.qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) } while (!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) EXPORT_SYMBOL(rvt_qp_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) * This should be called with s_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) enum ib_wc_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) u32 old_last, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) struct rvt_dev_info *rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) old_last = qp->s_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) trace_rvt_qp_send_completion(qp, wqe, old_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (qp->s_acked == old_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) qp->s_acked = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (qp->s_cur == old_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) qp->s_cur = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (qp->s_tail == old_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) qp->s_tail = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (qp->state == IB_QPS_SQD && last == qp->s_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) qp->s_draining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) EXPORT_SYMBOL(rvt_send_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * rvt_copy_sge - copy data to SGE memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * @qp: associated QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) * @ss: the SGE state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) * @data: the data to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * @length: the length of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * @release: boolean to release MR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * @copy_last: do a separate copy of the last 8 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) void *data, u32 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) bool release, bool copy_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct rvt_sge *sge = &ss->sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) bool in_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) bool cacheless_copy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) struct rvt_wss *wss = rdi->wss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) cacheless_copy = length >= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (length >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) * NOTE: this *assumes*:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) * o The first vaddr is the dest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * o If multiple pages, then vaddr is sequential.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) wss_insert(wss, sge->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (length >= (2 * PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) wss_insert(wss, (sge->vaddr + PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) cacheless_copy = wss_exceeds_threshold(wss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) wss_advance_clean_counter(wss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (copy_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (length > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) length -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) copy_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) in_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) while (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) u32 len = rvt_get_sge_length(sge, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) WARN_ON_ONCE(len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) if (unlikely(in_last)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) /* enforce byte transfer ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) } else if (cacheless_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) cacheless_memcpy(sge->vaddr, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) memcpy(sge->vaddr, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) rvt_update_sge(ss, len, release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) data += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) length -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (copy_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) copy_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) in_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) length = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) EXPORT_SYMBOL(rvt_copy_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) struct rvt_qp *sqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) rvp->n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * For RC, the requester would timeout and retry so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * shortcut the timeouts and just signal too many retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return sqp->ibqp.qp_type == IB_QPT_RC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * ruc_loopback - handle UC and RC loopback requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) * @sqp: the sending QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * Note that although we are single threaded due to the send engine, we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * have to protect against post_send(). We don't have to worry about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * receive interrupts since this is a connected protocol and all packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) * will pass through here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) void rvt_ruc_loopback(struct rvt_qp *sqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct rvt_ibport *rvp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) struct rvt_sge *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) u64 sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) atomic64_t *maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) enum ib_wc_status send_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) bool release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) bool copy_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) int local_ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) rvp = rdi->ports[sqp->port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) * Note that we check the responder QP state after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * checking the requester's state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) sqp->remote_qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) spin_lock_irqsave(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) /* Return if we are already busy processing a work request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) sqp->s_flags |= RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (sqp->s_last == READ_ONCE(sqp->s_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) goto clr_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) /* Return if it is not OK to start a new work request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) goto clr_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* We are in the error state, flush the work request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) send_status = IB_WC_WR_FLUSH_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) goto flush_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) * We can rely on the entry not changing without the s_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) * being held until we update s_last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) * We increment s_cur to indicate s_last is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) if (sqp->s_last == sqp->s_cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (++sqp->s_cur >= sqp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) sqp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) spin_unlock_irqrestore(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) send_status = loopback_qp_drop(rvp, sqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) goto serr_no_r_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) spin_lock_irqsave(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) qp->ibqp.qp_type != sqp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) send_status = loopback_qp_drop(rvp, sqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) goto serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) memset(&wc, 0, sizeof(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) send_status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) release = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) sqp->s_sge.sge = wqe->sg_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) sqp->s_sge.sg_list = wqe->sg_list + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) sqp->s_sge.num_sge = wqe->wr.num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) sqp->s_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) switch (wqe->wr.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) goto send_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) if (rvt_invalidate_rkey(sqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) wqe->wr.ex.invalidate_rkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) send_status = IB_WC_LOC_PROT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) local_ops = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) goto send_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) ret = rvt_get_rwqe(qp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) goto op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) goto rnr_nak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (wqe->length > qp->r_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) goto inv_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) switch (wqe->wr.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (!rvt_invalidate_rkey(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) wqe->wr.ex.invalidate_rkey)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) wc.wc_flags = IB_WC_WITH_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) wc.ex.invalidate_rkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) wqe->wr.ex.invalidate_rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) wc.wc_flags = IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) wc.ex.imm_data = wqe->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) goto inv_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) wc.wc_flags = IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) wc.ex.imm_data = wqe->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) ret = rvt_get_rwqe(qp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) goto op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) goto rnr_nak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) /* skip copy_last set and qp_access_flags recheck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) goto do_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) copy_last = rvt_is_user_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) goto inv_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) do_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (wqe->length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) wqe->rdma_wr.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) wqe->rdma_wr.rkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) IB_ACCESS_REMOTE_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) goto acc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) qp->r_sge.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) qp->r_sge.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) qp->r_sge.total_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) goto inv_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) wqe->rdma_wr.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) wqe->rdma_wr.rkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) IB_ACCESS_REMOTE_READ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) goto acc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) release = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) sqp->s_sge.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) sqp->s_sge.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) qp->r_sge.sge = wqe->sg_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) qp->r_sge.sg_list = wqe->sg_list + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) qp->r_sge.num_sge = wqe->wr.num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) qp->r_sge.total_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) goto inv_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) goto inv_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) wqe->atomic_wr.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) wqe->atomic_wr.rkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) IB_ACCESS_REMOTE_ATOMIC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) goto acc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) /* Perform atomic OP and save result. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) sdata = wqe->atomic_wr.compare_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) *(u64 *)sqp->s_sge.sge.vaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) (u64)atomic64_add_return(sdata, maddr) - sdata :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) sdata, wqe->atomic_wr.swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) rvt_put_mr(qp->r_sge.sge.mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) qp->r_sge.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) goto send_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) send_status = IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) goto serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) sge = &sqp->s_sge.sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) while (sqp->s_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) u32 len = rvt_get_sge_length(sge, sqp->s_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) WARN_ON_ONCE(len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) len, release, copy_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) rvt_update_sge(&sqp->s_sge, len, !release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) sqp->s_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) rvt_put_ss(&qp->r_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) goto send_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) wc.opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) wc.wr_id = qp->r_wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) wc.status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) wc.byte_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) wc.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) wc.src_qp = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) wc.port_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /* Signal completion event if the solicited bit is set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) send_comp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) spin_unlock_irqrestore(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) spin_lock_irqsave(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) rvp->n_loop_pkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) flush_send:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) rvt_send_complete(sqp, wqe, send_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) if (local_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) atomic_dec(&sqp->local_ops_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) local_ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) rnr_nak:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) /* Handle RNR NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) if (qp->ibqp.qp_type == IB_QPT_UC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) goto send_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) rvp->n_rnr_naks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * Note: we don't need the s_lock held since the BUSY flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) * makes this single threaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if (sqp->s_rnr_retry == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) send_status = IB_WC_RNR_RETRY_EXC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) goto serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (sqp->s_rnr_retry_cnt < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) sqp->s_rnr_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) spin_unlock_irqrestore(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) spin_lock_irqsave(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) goto clr_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) IB_AETH_CREDIT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) goto clr_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) op_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) send_status = IB_WC_REM_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) wc.status = IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) inv_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) send_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) sqp->ibqp.qp_type == IB_QPT_RC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) IB_WC_REM_INV_REQ_ERR :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) wc.status = IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) acc_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) send_status = IB_WC_REM_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) wc.status = IB_WC_LOC_PROT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) /* responder goes to error state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) rvt_rc_error(qp, wc.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) serr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) spin_unlock_irqrestore(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) serr_no_r_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) spin_lock_irqsave(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) rvt_send_complete(sqp, wqe, send_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (sqp->ibqp.qp_type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) sqp->s_flags &= ~RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) spin_unlock_irqrestore(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) if (lastwqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) struct ib_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) ev.device = sqp->ibqp.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) ev.element.qp = &sqp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) clr_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) sqp->s_flags &= ~RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) spin_unlock_irqrestore(&sqp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) EXPORT_SYMBOL(rvt_ruc_loopback);