Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright(c) 2015 - 2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This file is provided under a dual BSD/GPLv2 license.  When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *  - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *    notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *  - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *    notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *    the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *    distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *  - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *    contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *    from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <rdma/ib_mad.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <rdma/ib_user_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <rdma/opa_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include "verbs_txreq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include "debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include "vnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include "fault.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include "affinity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include "ipoib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static unsigned int hfi1_lkey_table_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		   S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) MODULE_PARM_DESC(lkey_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static unsigned int hfi1_max_pds = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) MODULE_PARM_DESC(max_pds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		 "Maximum number of protection domains to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static unsigned int hfi1_max_ahs = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) unsigned int hfi1_max_cqes = 0x2FFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) MODULE_PARM_DESC(max_cqes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		 "Maximum number of completion queue entries to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) unsigned int hfi1_max_cqs = 0x1FFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) unsigned int hfi1_max_qp_wrs = 0x3FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) unsigned int hfi1_max_qps = 32768;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) unsigned int hfi1_max_sges = 0x60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) unsigned int hfi1_max_mcast_grps = 16384;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) MODULE_PARM_DESC(max_mcast_grps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		 "Maximum number of multicast groups to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) unsigned int hfi1_max_mcast_qp_attached = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		   uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) MODULE_PARM_DESC(max_mcast_qp_attached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		 "Maximum number of attached QPs to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) unsigned int hfi1_max_srqs = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) unsigned int hfi1_max_srq_sges = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) unsigned int hfi1_max_srq_wrs = 0x1FFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) unsigned short piothreshold = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) module_param(piothreshold, ushort, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static unsigned int sge_copy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) module_param(sge_copy_mode, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) MODULE_PARM_DESC(sge_copy_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) static void verbs_sdma_complete(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	struct sdma_txreq *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static int pio_wait(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		    struct send_context *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		    struct hfi1_pkt_state *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		    u32 flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /* Length of buffer to create verbs txreq cache name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define TXREQ_NAME_LEN 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static uint wss_threshold = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) module_param(wss_threshold, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static uint wss_clean_period = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) module_param(wss_clean_period, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * Translate ib_wr_opcode into ib_wc_opcode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	[IB_WR_TID_RDMA_WRITE] = IB_WC_RDMA_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	[IB_WR_SEND] = IB_WC_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	[IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	[IB_WR_REG_MR] = IB_WC_REG_MR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * Length of header by opcode, 0 --> not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) const u8 hdr_len_by_opcode[256] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	[IB_OPCODE_RC_SEND_FIRST]                     = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	[IB_OPCODE_RC_SEND_MIDDLE]                    = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	[IB_OPCODE_RC_SEND_LAST]                      = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE]       = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	[IB_OPCODE_RC_SEND_ONLY]                      = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE]       = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	[IB_OPCODE_RC_RDMA_WRITE_FIRST]               = 12 + 8 + 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	[IB_OPCODE_RC_RDMA_WRITE_MIDDLE]              = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	[IB_OPCODE_RC_RDMA_WRITE_LAST]                = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	[IB_OPCODE_RC_RDMA_WRITE_ONLY]                = 12 + 8 + 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	[IB_OPCODE_RC_RDMA_READ_REQUEST]              = 12 + 8 + 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST]       = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE]      = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST]        = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY]        = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	[IB_OPCODE_RC_ACKNOWLEDGE]                    = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]             = 12 + 8 + 4 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	[IB_OPCODE_RC_COMPARE_SWAP]                   = 12 + 8 + 28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	[IB_OPCODE_RC_FETCH_ADD]                      = 12 + 8 + 28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]      = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE]      = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	[IB_OPCODE_TID_RDMA_READ_REQ]                 = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	[IB_OPCODE_TID_RDMA_READ_RESP]                = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	[IB_OPCODE_TID_RDMA_WRITE_REQ]                = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	[IB_OPCODE_TID_RDMA_WRITE_RESP]               = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	[IB_OPCODE_TID_RDMA_WRITE_DATA]               = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	[IB_OPCODE_TID_RDMA_WRITE_DATA_LAST]          = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	[IB_OPCODE_TID_RDMA_ACK]                      = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	[IB_OPCODE_TID_RDMA_RESYNC]                   = 12 + 8 + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	/* UC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	[IB_OPCODE_UC_SEND_FIRST]                     = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	[IB_OPCODE_UC_SEND_MIDDLE]                    = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	[IB_OPCODE_UC_SEND_LAST]                      = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE]       = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	[IB_OPCODE_UC_SEND_ONLY]                      = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE]       = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	[IB_OPCODE_UC_RDMA_WRITE_FIRST]               = 12 + 8 + 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	[IB_OPCODE_UC_RDMA_WRITE_MIDDLE]              = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	[IB_OPCODE_UC_RDMA_WRITE_LAST]                = 12 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	[IB_OPCODE_UC_RDMA_WRITE_ONLY]                = 12 + 8 + 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/* UD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	[IB_OPCODE_UD_SEND_ONLY]                      = 12 + 8 + 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE]       = 12 + 8 + 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static const opcode_handler opcode_handler_tbl[256] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	/* RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	[IB_OPCODE_RC_SEND_FIRST]                     = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	[IB_OPCODE_RC_SEND_MIDDLE]                    = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	[IB_OPCODE_RC_SEND_LAST]                      = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE]       = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	[IB_OPCODE_RC_SEND_ONLY]                      = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE]       = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	[IB_OPCODE_RC_RDMA_WRITE_FIRST]               = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	[IB_OPCODE_RC_RDMA_WRITE_MIDDLE]              = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	[IB_OPCODE_RC_RDMA_WRITE_LAST]                = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	[IB_OPCODE_RC_RDMA_WRITE_ONLY]                = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	[IB_OPCODE_RC_RDMA_READ_REQUEST]              = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST]       = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE]      = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST]        = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY]        = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	[IB_OPCODE_RC_ACKNOWLEDGE]                    = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]             = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	[IB_OPCODE_RC_COMPARE_SWAP]                   = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	[IB_OPCODE_RC_FETCH_ADD]                      = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]      = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE]      = &hfi1_rc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	/* TID RDMA has separate handlers for different opcodes.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	[IB_OPCODE_TID_RDMA_WRITE_REQ]       = &hfi1_rc_rcv_tid_rdma_write_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	[IB_OPCODE_TID_RDMA_WRITE_RESP]      = &hfi1_rc_rcv_tid_rdma_write_resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	[IB_OPCODE_TID_RDMA_WRITE_DATA]      = &hfi1_rc_rcv_tid_rdma_write_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	[IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = &hfi1_rc_rcv_tid_rdma_write_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	[IB_OPCODE_TID_RDMA_READ_REQ]        = &hfi1_rc_rcv_tid_rdma_read_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	[IB_OPCODE_TID_RDMA_READ_RESP]       = &hfi1_rc_rcv_tid_rdma_read_resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	[IB_OPCODE_TID_RDMA_RESYNC]          = &hfi1_rc_rcv_tid_rdma_resync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	[IB_OPCODE_TID_RDMA_ACK]             = &hfi1_rc_rcv_tid_rdma_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/* UC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	[IB_OPCODE_UC_SEND_FIRST]                     = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	[IB_OPCODE_UC_SEND_MIDDLE]                    = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	[IB_OPCODE_UC_SEND_LAST]                      = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE]       = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	[IB_OPCODE_UC_SEND_ONLY]                      = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE]       = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	[IB_OPCODE_UC_RDMA_WRITE_FIRST]               = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	[IB_OPCODE_UC_RDMA_WRITE_MIDDLE]              = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	[IB_OPCODE_UC_RDMA_WRITE_LAST]                = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	[IB_OPCODE_UC_RDMA_WRITE_ONLY]                = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	/* UD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	[IB_OPCODE_UD_SEND_ONLY]                      = &hfi1_ud_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE]       = &hfi1_ud_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	/* CNP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	[IB_OPCODE_CNP]				      = &hfi1_cnp_rcv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define OPMASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static const u32 pio_opmask[BIT(3)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/* RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	[IB_OPCODE_RC >> 5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		BIT(RC_OP(SEND_ONLY) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		BIT(RC_OP(FETCH_ADD) & OPMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	/* UC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	[IB_OPCODE_UC >> 5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		BIT(UC_OP(SEND_ONLY) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * System image GUID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) __be64 ib_hfi1_sys_image_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * Make sure the QP is ready and able to accept the given opcode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static inline opcode_handler qp_ok(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	     packet->qp->allowed_ops) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	    (packet->opcode == IB_OPCODE_CNP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return opcode_handler_tbl[packet->opcode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) #ifdef CONFIG_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		 * In order to drop non-IB traffic we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		 * set PbcInsertHrc to NONE (0x2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		 * The packet will still be delivered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		 * to the receiving node but a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		 * KHdrHCRCErr (KDETH packet with a bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		 * HCRC) will be triggered and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		 * packet will not be delivered to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		 * correct context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		pbc &= ~PBC_INSERT_HCRC_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 * In order to drop regular verbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 * traffic we set the PbcTestEbp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		 * flag. The packet will still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		 * delivered to the receiving node but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		 * a 'late ebp error' will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		 * triggered and will be dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		pbc |= PBC_TEST_EBP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	return pbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	if (packet->qp->ibqp.qp_type != IB_QPT_RC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	    !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		return opcode_handler_tbl[opcode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct ib_header *hdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	u32 tlen = packet->tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct hfi1_pportdata *ppd = rcd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct hfi1_ibport *ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	opcode_handler opcode_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	u32 qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	int lnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (unlikely(tlen < 15 * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (lnh != HFI1_LRH_BTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	packet->ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	inc_opstats(tlen, &rcd->opstats->stats[opcode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* verbs_qp can be picked up from any tid_rdma header struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (!packet->qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		goto drop_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	spin_lock_irqsave(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	opcode_handler = tid_qp_ok(opcode, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (likely(opcode_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		opcode_handler(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		goto drop_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	spin_unlock_irqrestore(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) drop_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	spin_unlock_irqrestore(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) drop_rcu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct ib_header *hdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	u32 tlen = packet->tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct hfi1_pportdata *ppd = rcd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct hfi1_ibport *ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	opcode_handler opcode_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	u32 qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	int lnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	/* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (unlikely(tlen < 15 * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (lnh != HFI1_LRH_BTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	packet->ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	inc_opstats(tlen, &rcd->opstats->stats[opcode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	/* verbs_qp can be picked up from any tid_rdma header struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!packet->qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		goto drop_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	spin_lock_irqsave(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	opcode_handler = tid_qp_ok(opcode, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (likely(opcode_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		opcode_handler(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		goto drop_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	spin_unlock_irqrestore(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) drop_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	spin_unlock_irqrestore(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) drop_rcu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) static int hfi1_do_pkey_check(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct hfi1_pportdata *ppd = rcd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct hfi1_16b_header *hdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	/* Pkey check needed only for bypass packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	if (packet->etype != RHF_RCV_TYPE_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	/* Perform pkey check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	pkey = hfi1_16B_get_pkey(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	return ingress_pkey_check(ppd, pkey, packet->sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 				  packet->qp->s_pkey_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 				  packet->slid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) static inline void hfi1_handle_packet(struct hfi1_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				      bool is_mcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	u32 qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct hfi1_pportdata *ppd = rcd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	opcode_handler packet_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (unlikely(is_mcast)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		struct rvt_mcast *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		struct rvt_mcast_qp *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		if (!packet->grh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		mcast = rvt_mcast_find(&ibp->rvp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				       &packet->grh->dgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 				       opa_get_lid(packet->dlid, 9B));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		if (!mcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		list_for_each_entry_rcu(p, &mcast->qp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			packet->qp = p->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			if (hfi1_do_pkey_check(packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 				goto unlock_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			spin_lock_irqsave(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			packet_handler = qp_ok(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			if (likely(packet_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 				packet_handler(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 				ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			spin_unlock_irqrestore(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		 * Notify rvt_multicast_detach() if it is waiting for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		 * to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (atomic_dec_return(&mcast->refcount) <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			wake_up(&mcast->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		/* Get the destination QP number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		if (packet->etype == RHF_RCV_TYPE_BYPASS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		    hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			qp_num = hfi1_16B_get_dest_qpn(packet->mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			qp_num = ib_bth_get_qpn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		if (!packet->qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			goto unlock_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		if (hfi1_do_pkey_check(packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			goto unlock_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		spin_lock_irqsave(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		packet_handler = qp_ok(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		if (likely(packet_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			packet_handler(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		spin_unlock_irqrestore(&packet->qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) unlock_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * hfi1_ib_rcv - process an incoming packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * @packet: data packet information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * This is called to process an incoming packet at interrupt level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) void hfi1_ib_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) void hfi1_16B_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	trace_input_ibhdr(rcd->dd, packet, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * This is called from a timer to check for QPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * which need kernel memory in order to send a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void mem_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct list_head *list = &dev->memwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct rvt_qp *qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct iowait *wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	struct hfi1_qp_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	write_seqlock_irqsave(&dev->iowait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (!list_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		wait = list_first_entry(list, struct iowait, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		qp = iowait_to_qp(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		list_del_init(&priv->s_iowait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		priv->s_iowait.lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		/* refcount held until actual wake up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		if (!list_empty(list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			mod_timer(&dev->mem_timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	write_sequnlock_irqrestore(&dev->iowait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * This is called with progress side lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) /* New API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static void verbs_sdma_complete(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct sdma_txreq *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	struct verbs_txreq *tx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		container_of(cookie, struct verbs_txreq, txreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	struct rvt_qp *qp = tx->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (tx->wqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		struct hfi1_opa_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		hdr = &tx->phdr.hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (unlikely(status == SDMA_TXREQ_S_ABORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			hfi1_rc_verbs_aborted(qp, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		hfi1_rc_send_complete(qp, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	hfi1_put_txreq(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) void hfi1_wait_kmem(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct ib_qp *ibqp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	struct ib_device *ibdev = ibqp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct hfi1_ibdev *dev = to_idev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (list_empty(&priv->s_iowait.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		if (list_empty(&dev->memwait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			mod_timer(&dev->mem_timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		qp->s_flags |= RVT_S_WAIT_KMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		list_add_tail(&priv->s_iowait.list, &dev->memwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		priv->s_iowait.lock = &dev->iowait_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static int wait_kmem(struct hfi1_ibdev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		     struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		     struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		write_seqlock(&dev->iowait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		list_add_tail(&ps->s_txreq->txreq.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			      &ps->wait->tx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		hfi1_wait_kmem(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		write_sequnlock(&dev->iowait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		hfi1_qp_unbusy(qp, ps->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  * This routine calls txadds for each sg entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * Add failures will revert the sge cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static noinline int build_verbs_ulp_payload(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	u32 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	struct verbs_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct rvt_sge_state *ss = tx->ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct rvt_sge *sg_list = ss->sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	struct rvt_sge sge = ss->sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	u8 num_sge = ss->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	while (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		len = rvt_get_sge_length(&ss->sge, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		WARN_ON_ONCE(len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		ret = sdma_txadd_kvaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			&tx->txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			ss->sge.vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			goto bail_txadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		rvt_update_sge(ss, len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		length -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) bail_txadd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* unwind cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	ss->sge = sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	ss->num_sge = num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	ss->sg_list = sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * update_tx_opstats - record stats by opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * @qp; the qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  * @ps: transmit packet state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  * @plen: the plen in dwords
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736)  * This is a routine to record the tx opstats after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * packet has been presented to the egress mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			      u32 plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	inc_opstats(plen * 4, &s->stats[ps->opcode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	put_cpu_ptr(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * Build the number of DMA descriptors needed to send length bytes of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * NOTE: DMA mapping is held in the tx until completed in the ring or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  *       the tx desc is freed without having been submitted to the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * This routine ensures all the helper routine calls succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) /* New API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static int build_verbs_tx_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	u32 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct verbs_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	struct hfi1_ahg_info *ahg_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	u64 pbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	struct hfi1_sdma_header *phdr = &tx->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	u8 extra_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (tx->phdr.hdr.hdr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		 * hdrbytes accounts for PBC. Need to subtract 8 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		 * before calculating padding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			      (SIZE_OF_CRC << 2) + SIZE_OF_LT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (!ahg_info->ahgcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		ret = sdma_txinit_ahg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			&tx->txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			ahg_info->tx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			hdrbytes + length +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			extra_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			ahg_info->ahgidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			verbs_sdma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			goto bail_txadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		phdr->pbc = cpu_to_le64(pbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		ret = sdma_txadd_kvaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			&tx->txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			phdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			hdrbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			goto bail_txadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		ret = sdma_txinit_ahg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			&tx->txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			ahg_info->tx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			ahg_info->ahgidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			ahg_info->ahgcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			ahg_info->ahgdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			hdrbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			verbs_sdma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			goto bail_txadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	/* add the ulp payload - if any. tx->ss can be NULL for acks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (tx->ss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		ret = build_verbs_ulp_payload(sde, length, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			goto bail_txadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* add icrc, lt byte, and padding to flit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (extra_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				       sde->dd->sdma_pad_phys, extra_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) bail_txadd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) static u64 update_hcrc(u8 opcode, u64 pbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		pbc &= ~PBC_INSERT_HCRC_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	return pbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			u64 pbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct hfi1_ahg_info *ahg_info = priv->s_ahg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	u32 hdrwords = ps->s_txreq->hdr_dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	u32 len = ps->s_txreq->s_cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct hfi1_ibdev *dev = ps->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct hfi1_pportdata *ppd = ps->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct verbs_txreq *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	u8 sc5 = priv->s_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	u32 dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (ps->s_txreq->phdr.hdr.hdr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			  SIZE_OF_LT) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		dwords = (len + 3) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	plen = hdrwords + dwords + sizeof(pbc) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	tx = ps->s_txreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (!sdma_txreq_built(&tx->txreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		if (likely(pbc == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			/* No vl15 here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			/* set PBC_DC_INFO bit (aka SC[4]) in pbc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			if (ps->s_txreq->phdr.hdr.hdr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				pbc |= PBC_PACKET_BYPASS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				       PBC_INSERT_BYPASS_ICRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			pbc = create_pbc(ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 					 pbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 					 qp->srate_mbps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 					 vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 					 plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 				/* Update HCRC based on packet opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				pbc = update_hcrc(ps->opcode, pbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		tx->wqe = qp->s_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			goto bail_build;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	ret =  sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		if (ret == -ECOMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			goto bail_ecomm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	update_tx_opstats(qp, ps, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) bail_ecomm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	/* The current one got "sent" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) bail_build:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	ret = wait_kmem(dev, qp, ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		/* free txreq - bad state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		hfi1_put_txreq(ps->s_txreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		ps->s_txreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * If we are now in the error state, return zero to flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  * send work request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) static int pio_wait(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		    struct send_context *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		    struct hfi1_pkt_state *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		    u32 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct hfi1_devdata *dd = sc->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 * Note that as soon as want_buffer() is called and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 * possibly before it returns, sc_piobufavail()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 * could be called. Therefore, put QP on the I/O wait list before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 * enabling the PIO avail interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		write_seqlock(&sc->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		list_add_tail(&ps->s_txreq->txreq.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			      &ps->wait->tx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		if (list_empty(&priv->s_iowait.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			struct hfi1_ibdev *dev = &dd->verbs_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			int was_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			qp->s_flags |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			was_empty = list_empty(&sc->piowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			iowait_get_priority(&priv->s_iowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			iowait_queue(ps->pkts_sent, &priv->s_iowait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				     &sc->piowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			priv->s_iowait.lock = &sc->waitlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			/* counting: only call wantpiobuf_intr if first user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			if (was_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				hfi1_sc_wantpiobuf_intr(sc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		write_sequnlock(&sc->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		hfi1_qp_unbusy(qp, ps->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) static void verbs_pio_complete(void *arg, int code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	struct rvt_qp *qp = (struct rvt_qp *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (iowait_pio_dec(&priv->s_iowait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		iowait_drain_wakeup(&priv->s_iowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			u64 pbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	u32 hdrwords = ps->s_txreq->hdr_dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct rvt_sge_state *ss = ps->s_txreq->ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	u32 len = ps->s_txreq->s_cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	u32 dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct hfi1_pportdata *ppd = ps->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	u32 *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	u8 sc5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct send_context *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct pio_buf *pbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	int wc_status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	pio_release_cb cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	u8 extra_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (ps->s_txreq->phdr.hdr.hdr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		dwords = (len + extra_bytes) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		dwords = (len + 3) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	plen = hdrwords + dwords + sizeof(pbc) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* only RC/UC use complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		cb = verbs_pio_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	/* vl15 special case taken care of in ud.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	sc5 = priv->s_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	sc = ps->s_txreq->psc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (likely(pbc == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		/* set PBC_DC_INFO bit (aka SC[4]) in pbc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		if (ps->s_txreq->phdr.hdr.hdr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			/* Update HCRC based on packet opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			pbc = update_hcrc(ps->opcode, pbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		iowait_pio_inc(&priv->s_iowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	pbuf = sc_buffer_alloc(sc, plen, cb, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (IS_ERR_OR_NULL(pbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		if (cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			verbs_pio_complete(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		if (IS_ERR(pbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			 * If we have filled the PIO buffers to capacity and are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			 * not in an active state this request is not going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			 * go out to so just complete it with an error or else a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			 * ULP or the core may be stuck waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			hfi1_cdbg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				PIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				"alloc failed. state not active, completing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			wc_status = IB_WC_GENERAL_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			goto pio_bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			 * This is a normal occurrence. The PIO buffs are full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			 * up but we are still happily sending, well we could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			 * so lets continue to queue the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			hfi1_cdbg(PIO, "alloc failed. state active, queuing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				/* txreq not queued - free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			/* tx consumed in wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (dwords == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		seg_pio_copy_start(pbuf, pbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 				   hdr, hdrwords * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		if (ss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				void *addr = ss->sge.vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				u32 slen = rvt_get_sge_length(&ss->sge, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				rvt_update_sge(ss, slen, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 				seg_pio_copy_mid(pbuf, addr, slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 				len -= slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		/* add icrc, lt byte, and padding to flit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		if (extra_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					 extra_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		seg_pio_copy_end(pbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	update_tx_opstats(qp, ps, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			       &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) pio_bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	if (qp->s_wqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		rvt_send_complete(qp, qp->s_wqe, wc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		if (unlikely(wc_status == IB_WC_GENERAL_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	hfi1_put_txreq(ps->s_txreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * being an entry from the partition key table), return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * otherwise. Use the matching criteria for egress partition keys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  * specified in the OPAv1 spec., section 9.1l.7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	u16 mkey = pkey & PKEY_LOW_15_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	u16 mentry = ent & PKEY_LOW_15_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (mkey == mentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		 * If pkey[15] is set (full partition member),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		 * is bit 15 in the corresponding table element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		 * clear (limited member)?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (pkey & PKEY_MEMBER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			return !!(ent & PKEY_MEMBER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * egress_pkey_check - check P_KEY of a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * @ppd:  Physical IB port data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * @slid: SLID for packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * @bkey: PKEY for header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * @sc5:  SC for packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * @s_pkey_index: It will be used for look up optimization for kernel contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  * only. If it is negative value, then it means user contexts is calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  * It checks if hdr's pkey is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  * Return: 0 on success, otherwise, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		      u8 sc5, int8_t s_pkey_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	int is_user_ctxt_mechanism = (s_pkey_index < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	/* If SC15, pkey[0:14] must be 0x7fff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	/* Is the pkey = 0x0, or 0x8000? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if ((pkey & PKEY_LOW_15_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	 * For the kernel contexts only, if a qp is passed into the function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	 * the most likely matching pkey has index qp->s_pkey_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (!is_user_ctxt_mechanism &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	    egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	for (i = 0; i < MAX_PKEY_VALUES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	 * For the user-context mechanism, the P_KEY check would only happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	 * once per SDMA request, not once per packet.  Therefore, there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	 * need to increment the counter for the user-context mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (!is_user_ctxt_mechanism) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		incr_cntr64(&ppd->port_xmit_constraint_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (!(dd->err_info_xmit_constraint.status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		      OPA_EI_STATUS_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			dd->err_info_xmit_constraint.status |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 				OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			dd->err_info_xmit_constraint.slid = slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			dd->err_info_xmit_constraint.pkey = pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  * get_send_routine - choose an egress routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * Choose an egress routine based on QP type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * and size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static inline send_routine get_send_routine(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 					    struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct verbs_txreq *tx = ps->s_txreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return dd->process_pio_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	case IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return dd->process_pio_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	case IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		priv->s_running_pkt_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			(tx->s_cur_size + priv->s_running_pkt_size) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		if (piothreshold &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		    priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		    (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		    iowait_sdma_pending(&priv->s_iowait) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		    !sdma_txreq_built(&tx->txreq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			return dd->process_pio_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	return dd->process_dma_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * hfi1_verbs_send - send a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * @qp: the QP to send on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * @ps: the state of the packet to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * Return zero if packet is sent or queued OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct ib_other_headers *ohdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	send_routine sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	u32 slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	u8 l4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	/* locate the pkey within the headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (ps->s_txreq->phdr.hdr.hdr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		l4 = hfi1_16B_get_l4(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		if (l4 == OPA_16B_L4_IB_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		else if (l4 == OPA_16B_L4_IB_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		slid = hfi1_16B_get_slid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		pkey = hfi1_16B_get_pkey(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		u8 lnh = ib_get_lnh(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		if (lnh == HFI1_LRH_GRH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 			ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		slid = ib_get_slid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		pkey = ib_bth_get_pkey(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (likely(l4 != OPA_16B_L4_FM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		ps->opcode = ib_bth_get_opcode(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		ps->opcode = IB_OPCODE_UD_SEND_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	sr = get_send_routine(qp, ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	ret = egress_pkey_check(dd->pport, slid, pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				priv->s_sc, qp->s_pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		 * The value we are returning here does not get propagated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		 * the verbs caller. Thus we need to complete the request with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		 * error otherwise the caller could be sitting waiting on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		 * completion event. Only do this for PIO. SDMA has its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		 * mechanism for handling the errors. So for SDMA we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		 * return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		if (sr == dd->process_pio_send) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			hfi1_cdbg(PIO, "%s() Failed. Completing with err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				  __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		return pio_wait(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 				ps->s_txreq->psc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				HFI1_S_WAIT_PIO_DRAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	return sr(qp, ps, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * @dd: the device data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	u32 ver = dd->dc8051_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		((u64)(dc8051_ver_min(ver)) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		(u64)dc8051_ver_patch(ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			IB_DEVICE_MEM_MGT_EXTENSIONS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			IB_DEVICE_RDMA_NETDEV_OPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	rdi->dparms.props.page_size_cap = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	rdi->dparms.props.vendor_part_id = dd->pcidev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	rdi->dparms.props.hw_ver = dd->minrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	rdi->dparms.props.max_mr_size = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	rdi->dparms.props.max_qp = hfi1_max_qps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	rdi->dparms.props.max_qp_wr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		(hfi1_max_qp_wrs >= HFI1_QP_WQE_INVALID ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		 HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	rdi->dparms.props.max_send_sge = hfi1_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	rdi->dparms.props.max_recv_sge = hfi1_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	rdi->dparms.props.max_sge_rd = hfi1_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	rdi->dparms.props.max_cq = hfi1_max_cqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	rdi->dparms.props.max_ah = hfi1_max_ahs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	rdi->dparms.props.max_cqe = hfi1_max_cqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	rdi->dparms.props.max_pd = hfi1_max_pds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	rdi->dparms.props.max_qp_init_rd_atom = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	rdi->dparms.props.max_srq = hfi1_max_srqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	rdi->dparms.props.max_total_mcast_qp_attach =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 					rdi->dparms.props.max_mcast_qp_attach *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 					rdi->dparms.props.max_mcast_grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static inline u16 opa_speed_to_ib(u16 in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	u16 out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (in & OPA_LINK_SPEED_25G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		out |= IB_SPEED_EDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	if (in & OPA_LINK_SPEED_12_5G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		out |= IB_SPEED_FDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	return out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)  * Convert a single OPA link width (no multiple flags) to an IB value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)  * A zero OPA link width means link down, which means the IB width value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)  * is a don't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static inline u16 opa_width_to_ib(u16 in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	switch (in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	case OPA_LINK_WIDTH_1X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	/* map 2x and 3x to 1x as they don't exist in IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	case OPA_LINK_WIDTH_2X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	case OPA_LINK_WIDTH_3X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		return IB_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	default: /* link down or unknown, return our largest width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	case OPA_LINK_WIDTH_4X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		return IB_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static int query_port(struct rvt_dev_info *rdi, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		      struct ib_port_attr *props)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	u32 lid = ppd->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	/* props being zeroed by the caller, avoid zeroing it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	props->lid = lid ? lid : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	props->lmc = ppd->lmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	/* OPA logical states match IB logical states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	props->state = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	props->phys_state = driver_pstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	/* see rate_show() in ib core/sysfs.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	props->active_speed = opa_speed_to_ib(ppd->link_speed_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	props->max_vl_num = ppd->vls_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	/* Once we are a "first class" citizen and have added the OPA MTUs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 * the core we can advertise the larger MTU enum to the ULPs, for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	 * advertise only 4K.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	 * Those applications which are either OPA aware or pass the MTU enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	 * from the Path Records to us will get the new 8k MTU.  Those that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	 * attempt to process the MTU enum may fail in various ways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				      4096 : hfi1_max_mtu), IB_MTU_4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	props->phys_mtu = hfi1_max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static int modify_device(struct ib_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			 int device_modify_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			 struct ib_device_modify *device_modify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	struct hfi1_devdata *dd = dd_from_ibdev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 				   IB_DEVICE_MODIFY_NODE_DESC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		memcpy(device->node_desc, device_modify->node_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		       IB_DEVICE_NODE_DESC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		for (i = 0; i < dd->num_pports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 			hfi1_node_desc_chg(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		ib_hfi1_sys_image_guid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			cpu_to_be64(device_modify->sys_image_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		for (i = 0; i < dd->num_pports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			hfi1_sys_guid_chg(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			     OPA_LINKDOWN_REASON_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	ret = set_link_state(ppd, HLS_DN_DOWNDEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			    int guid_index, __be64 *guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (guid_index >= HFI1_GUIDS_PER_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	*guid = get_sguid(ibp, guid_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)  * convert ah port,sl to sc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	u8 sc5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	u8 sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	    !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	/* test the mapping for validity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	dd = dd_from_ppd(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	sl = rdma_ah_get_sl(ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	sc5 = ibp->sl_to_sc[sl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void hfi1_notify_new_ah(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			       struct rdma_ah_attr *ah_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			       struct rvt_ah *ah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	u8 sc5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	struct rdma_ah_attr *attr = &ah->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	 * Do not trust reading anything from rvt_ah at this point as it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	 * done being setup. We can however modify things which we need to set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	hfi1_update_ah_attr(ibdev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	hfi1_make_opa_lid(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	dd = dd_from_ppd(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	ah->vl = sc_to_vlt(dd, sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (ah->vl < num_vls || ah->vl == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  * hfi1_get_npkeys - return the size of the PKEY table for context 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	return ARRAY_SIZE(dd->pport[0].pkeys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static void init_ibport(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct hfi1_ibport *ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	for (i = 0; i < sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		ibp->sl_to_sc[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		ibp->sc_to_sl[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	spin_lock_init(&ibp->rvp.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	/* Set the prefix to the default value (see ch. 4.1.1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	ibp->rvp.sm_lid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	 * Below should only set bits defined in OPA PortInfo.CapabilityMask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	 * and PortInfo.CapabilityMask3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		IB_PORT_CAP_MASK_NOTICE_SUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	struct hfi1_ibdev *dev = dev_from_rdi(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	u32 ver = dd_from_dev(dev)->dc8051_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		 dc8051_ver_min(ver), dc8051_ver_patch(ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static const char * const driver_cntr_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	/* must be element 0*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	"DRIVER_KernIntr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	"DRIVER_ErrorIntr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	"DRIVER_Tx_Errs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	"DRIVER_Rcv_Errs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	"DRIVER_HW_Errs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	"DRIVER_NoPIOBufs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	"DRIVER_CtxtsOpen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	"DRIVER_RcvLen_Errs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	"DRIVER_EgrBufFull",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	"DRIVER_EgrHdrFull"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static const char **dev_cntr_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static const char **port_cntr_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static int num_dev_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static int num_port_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static int cntr_names_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  * Convert a list of names separated by '\n' into an array of NULL terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  * strings. Optionally some entries can be reserved in the array to hold extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  * external strings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static int init_cntr_names(const char *names_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			   const size_t names_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			   int num_extra_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			   int *num_cntrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			   const char ***cntr_names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	char *names_out, *p, **q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	for (i = 0; i < names_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		if (names_in[i] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	if (!names_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		*num_cntrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		*cntr_names = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	p = names_out + (n + num_extra_names) * sizeof(char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	memcpy(p, names_in, names_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	q = (char **)names_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		q[i] = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		p = strchr(p, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		*p++ = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	*num_cntrs = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	*cntr_names = (const char **)names_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 					    u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	mutex_lock(&cntr_names_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	if (!cntr_names_initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		err = init_cntr_names(dd->cntrnames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				      dd->cntrnameslen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				      num_driver_cntrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				      &num_dev_cntrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 				      &dev_cntr_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			mutex_unlock(&cntr_names_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		for (i = 0; i < num_driver_cntrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			dev_cntr_names[num_dev_cntrs + i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 				driver_cntr_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		err = init_cntr_names(dd->portcntrnames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 				      dd->portcntrnameslen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 				      0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 				      &num_port_cntrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 				      &port_cntr_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			kfree(dev_cntr_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			dev_cntr_names = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			mutex_unlock(&cntr_names_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		cntr_names_initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	mutex_unlock(&cntr_names_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (!port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		return rdma_alloc_hw_stats_struct(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 				dev_cntr_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 				num_dev_cntrs + num_driver_cntrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 				RDMA_HW_STATS_DEFAULT_LIFESPAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		return rdma_alloc_hw_stats_struct(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 				port_cntr_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 				num_port_cntrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 				RDMA_HW_STATS_DEFAULT_LIFESPAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static u64 hfi1_sps_ints(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	unsigned long index, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	u64 sps_ints = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	xa_lock_irqsave(&hfi1_dev_table, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	xa_for_each(&hfi1_dev_table, index, dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		sps_ints += get_all_cpu_total(dd->int_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	xa_unlock_irqrestore(&hfi1_dev_table, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	return sps_ints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			u8 port, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	u64 *values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		u64 *stats = (u64 *)&hfi1_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		values[num_dev_cntrs] = hfi1_sps_ints();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		for (i = 1; i < num_driver_cntrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			values[num_dev_cntrs + i] = stats[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		count = num_dev_cntrs + num_driver_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		count = num_port_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	memcpy(stats->value, values, count * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static const struct ib_device_ops hfi1_dev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	.driver_id = RDMA_DRIVER_HFI1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	.alloc_hw_stats = alloc_hw_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	.alloc_rdma_netdev = hfi1_vnic_alloc_rn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	.get_dev_fw_str = hfi1_get_dev_fw_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	.get_hw_stats = get_hw_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	.init_port = hfi1_create_port_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	.modify_device = modify_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	/* keep process mad in the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	.process_mad = hfi1_process_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	.rdma_netdev_get_params = hfi1_ipoib_rn_get_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * hfi1_register_ib_device - register our device with the infiniband core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  * @dd: the device data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  * Return 0 if successful, errno if unsuccessful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) int hfi1_register_ib_device(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	struct hfi1_ibdev *dev = &dd->verbs_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	struct ib_device *ibdev = &dev->rdi.ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	struct hfi1_ibport *ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	for (i = 0; i < dd->num_pports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		init_ibport(ppd + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	/* Only need to initialize non-zero fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	timer_setup(&dev->mem_timer, mem_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	seqlock_init(&dev->iowait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	seqlock_init(&dev->txwait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	INIT_LIST_HEAD(&dev->txwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	INIT_LIST_HEAD(&dev->memwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	ret = verbs_txreq_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		goto err_verbs_txreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	/* Use first-port GUID as node guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	 * The system image GUID is supposed to be the same for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	 * HFIs in a single system but since there can be other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	 * device types in the system, we can't be sure this is unique.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (!ib_hfi1_sys_image_guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		ib_hfi1_sys_image_guid = ibdev->node_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	ibdev->phys_port_cnt = dd->num_pports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	ibdev->dev.parent = &dd->pcidev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	ib_set_device_ops(ibdev, &hfi1_dev_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	strlcpy(ibdev->node_desc, init_utsname()->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		sizeof(ibdev->node_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	 * Fill in rvt info object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	 * Fill in rvt info device attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	hfi1_fill_device_attr(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	/* queue pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	dd->verbs_dev.rdi.dparms.qpn_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	dd->verbs_dev.rdi.dparms.qpn_inc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 						RDMA_CORE_CAP_OPA_AH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 						hfi1_comp_vect_mappings_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	/* completeion queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	dd->verbs_dev.rdi.dparms.node = dd->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	/* misc settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	dd->verbs_dev.rdi.dparms.reserved_operations = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	/* post send table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	/* opcode translation table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	for (i = 0; i < dd->num_pports; i++, ppd++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		rvt_init_port(&dd->verbs_dev.rdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			      &ppd->ibport_data.rvp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			      i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			      ppd->pkeys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 				    &ib_hfi1_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	ret = rvt_register_device(&dd->verbs_dev.rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		goto err_verbs_txreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	ret = hfi1_verbs_register_sysfs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		goto err_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) err_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	rvt_unregister_device(&dd->verbs_dev.rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) err_verbs_txreq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	verbs_txreq_exit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	struct hfi1_ibdev *dev = &dd->verbs_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	hfi1_verbs_unregister_sysfs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	rvt_unregister_device(&dd->verbs_dev.rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	if (!list_empty(&dev->txwait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		dd_dev_err(dd, "txwait list not empty!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	if (!list_empty(&dev->memwait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		dd_dev_err(dd, "memwait list not empty!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	del_timer_sync(&dev->mem_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	verbs_txreq_exit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	mutex_lock(&cntr_names_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	kfree(dev_cntr_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	kfree(port_cntr_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	dev_cntr_names = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	port_cntr_names = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	cntr_names_initialized = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	mutex_unlock(&cntr_names_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) void hfi1_cnp_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct ib_header *hdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	u32 lqpn, rqpn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	u16 rlid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	u8 sl, sc5, svc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	switch (packet->qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		rqpn = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		svc_type = IB_CC_SVCTYPE_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		rqpn = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		svc_type = IB_CC_SVCTYPE_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	case IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	case IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		svc_type = IB_CC_SVCTYPE_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	sl = ibp->sc_to_sl[sc5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	lqpn = qp->ibqp.qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }