Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright(c) 2015-2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This file is provided under a dual BSD/GPLv2 license.  When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *  - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *    notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *  - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *    notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *    the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *    distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *  - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *    contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *    from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include "sdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include "debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include "vnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include "fault.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include "ipoib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include "netdev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define pr_fmt(fmt) DRIVER_NAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * The size has to be longer than this string, so we can append
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * board/chip information to it in the initialization code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) DEFINE_MUTEX(hfi1_mutex);	/* general driver use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		 HFI1_DEFAULT_MAX_MTU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) unsigned int hfi1_cu = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) module_param_named(cu, hfi1_cu, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) MODULE_PARM_DESC(cu, "Credit return units");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static const struct kernel_param_ops cap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	.set = hfi1_caps_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	.get = hfi1_caps_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define MAX_PKT_RECV 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * MAX_PKT_THREAD_RCV is the max # of packets processed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * the qp_wait_list queue is flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define EGR_HEAD_UPDATE_THRESHOLD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) struct hfi1_ib_stats hfi1_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		cap_mask = *cap_mask_ptr, value, diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			      HFI1_CAP_WRITABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	ret = kstrtoul(val, 0, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		pr_warn("Invalid module parameter value for 'cap_mask'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/* Get the changed bits (except the locked bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/* Remove any bits that are not allowed to change after driver load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		pr_warn("Ignoring non-writable capability bits %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			diff & ~write_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		diff &= write_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	/* Mask off any reserved bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	diff &= ~HFI1_CAP_RESERVED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	/* Clear any previously set and changing bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	cap_mask &= ~diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	/* Update the bits with the new capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	cap_mask |= (value & diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	/* Check for any kernel/user restrictions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	cap_mask &= ~diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	/* Set the bitmask to the final set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	*cap_mask_ptr = cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	unsigned long cap_mask = *(unsigned long *)kp->arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct hfi1_devdata *dd = container_of(ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 					       struct hfi1_devdata, verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return dd->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * Return count of units with at least one port ACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) int hfi1_count_active_units(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	unsigned long index, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	int pidx, nunits_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	xa_lock_irqsave(&hfi1_dev_table, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	xa_for_each(&hfi1_dev_table, index, dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			if (ppd->lid && ppd->linkup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				nunits_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	xa_unlock_irqrestore(&hfi1_dev_table, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	return nunits_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * Get address of eager buffer from it's index (allocated in chunks, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * contiguous).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			       u8 *update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	*update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			(offset * RCV_BUF_BLOCK_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 				    __le32 *rhf_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	return (void *)(rhf_addr - rcd->rhf_offset + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 						   __le32 *rhf_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static inline struct hfi1_16b_header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		*hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				     __le32 *rhf_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * Validate and encode the a given RcvArray Buffer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * The function will check whether the given size falls within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * allowed size ranges for the respective type and, optionally,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * return the proper encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (unlikely(!PAGE_ALIGNED(size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (unlikely(size < MIN_EAGER_BUFFER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (size >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	    (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	if (encoded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		*encoded = ilog2(size / PAGE_SIZE) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		       struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct ib_header *rhdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	u32 rte = rhf_rcv_type_err(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	u32 mlid_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct rvt_dev_info *rdi = &verbs_dev->rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	if ((packet->rhf & RHF_DC_ERR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	    hfi1_dbg_fault_suppress_err(verbs_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	if (packet->rhf & RHF_ICRC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (packet->etype == RHF_RCV_TYPE_BYPASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		u8 lnh = ib_get_lnh(rhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		if (lnh == HFI1_LRH_BTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			packet->ohdr = &rhdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		} else if (lnh == HFI1_LRH_GRH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			packet->ohdr = &rhdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			packet->grh = &rhdr->u.l.grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (packet->rhf & RHF_TID_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		/* For TIDERR and RC QPs preemptively schedule a NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		u32 dlid = ib_get_dlid(rhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		u32 qp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		/* Sanity check packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		if (tlen < 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		/* Check for GRH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		if (packet->grh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			u32 vtf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			struct ib_grh *grh = packet->grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			if (grh->next_hdr != IB_GRH_NEXT_HDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 				goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			vtf = be32_to_cpu(grh->version_tclass_flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 				goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		/* Get the destination QP number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		qp_num = ib_bth_get_qpn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		if (dlid < mlid_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 				goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			 * Handle only RC QPs - for other QP types drop error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			 * packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			spin_lock_irqsave(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			/* Check for valid receive state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			if (!(ib_rvt_state_ops[qp->state] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			      RVT_PROCESS_RECV_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 				hfi1_rc_hdrerr(rcd, packet, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				/* For now don't handle any other QP types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			spin_unlock_irqrestore(&qp->r_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		} /* Unicast QP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	} /* Valid packet with TIDErr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	/* handle "RcvTypeErr" flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	switch (rte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	case RHF_RTE_ERROR_OP_CODE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		void *ebuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (rhf_use_egr_bfr(packet->rhf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			ebuf = packet->ebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (!ebuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			goto drop; /* this should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		opcode = ib_bth_get_opcode(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		if (opcode == IB_OPCODE_CNP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			 * Only in pre-B0 h/w is the CNP_OPCODE handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			 * via this code path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			struct rvt_qp *qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			u32 lqpn, rqpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			u16 rlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			u8 svc_type, sl, sc5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			sl = ibp->sc_to_sl[sc5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			lqpn = ib_bth_get_qpn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 				rlid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 				rqpn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				svc_type = IB_CC_SVCTYPE_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 				rlid = ib_get_slid(rhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 				rqpn = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 				svc_type = IB_CC_SVCTYPE_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static inline void init_packet(struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			       struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	packet->rsize = get_hdrqentsize(rcd); /* words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	packet->rcd = rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	packet->updegr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	packet->etail = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	packet->rhf_addr = get_rhf_addr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	packet->rhqoff = hfi1_rcd_head(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	packet->numpkt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) /* We support only two types - 9B and 16B for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	[HFI1_PKT_TYPE_9B] = &return_cnp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	[HFI1_PKT_TYPE_16B] = &return_cnp_16B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  * hfi1_process_ecn_slowpath - Process FECN or BECN bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * @qp: The packet's destination QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  * @pkt: The packet itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * @prescan: Is the caller the RXQ prescan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * Process the packet's FECN or BECN bits. By now, the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * has already been evaluated whether processing of those bit should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  * The significance of the @prescan argument is that if the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * is the RXQ prescan, a CNP will be send out instead of waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * normal packet processing to send an ACK with BECN set (or a CNP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			       bool prescan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct ib_other_headers *ohdr = pkt->ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct ib_grh *grh = pkt->grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	u32 rqpn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	u32 rlid, slid, dlid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	u8 hdr_type, sc, svc_type, opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	bool is_mcast = false, ignore_fecn = false, do_cnp = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		fecn, becn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	/* can be called from prescan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		pkey = hfi1_16B_get_pkey(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		sc = hfi1_16B_get_sc(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		dlid = hfi1_16B_get_dlid(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		slid = hfi1_16B_get_slid(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		is_mcast = hfi1_is_16B_mcast(dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		opcode = ib_bth_get_opcode(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		hdr_type = HFI1_PKT_TYPE_16B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		fecn = hfi1_16B_get_fecn(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		becn = hfi1_16B_get_becn(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		pkey = ib_bth_get_pkey(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			ppd->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		slid = ib_get_slid(pkt->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			   (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		opcode = ib_bth_get_opcode(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		hdr_type = HFI1_PKT_TYPE_9B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		fecn = ib_bth_get_fecn(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		becn = ib_bth_get_becn(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	case IB_QPT_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		rlid = slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		rqpn = ib_get_sqpn(pkt->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		svc_type = IB_CC_SVCTYPE_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	case IB_QPT_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	case IB_QPT_GSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		rlid = slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		rqpn = ib_get_sqpn(pkt->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		svc_type = IB_CC_SVCTYPE_UD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		rqpn = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		svc_type = IB_CC_SVCTYPE_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		rqpn = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		svc_type = IB_CC_SVCTYPE_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		(opcode == IB_OPCODE_RC_ACKNOWLEDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * ACKNOWLEDGE packets do not get a CNP but this will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * guarded by ignore_fecn above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	do_cnp = prescan ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		(opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		opcode == TID_OP(READ_RESP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		opcode == TID_OP(ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	/* Call appropriate CNP handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (!ignore_fecn && do_cnp && fecn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 					      dlid, rlid, sc, grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (becn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		u8 sl = ibp->sc_to_sl[sc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return !ignore_fecn && fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) struct ps_mdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	u32 rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	u32 maxcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	u32 ps_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	u32 ps_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	u32 ps_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static inline void init_ps_mdata(struct ps_mdata *mdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 				 struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	mdata->rcd = rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	mdata->rsize = packet->rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	mdata->maxcnt = packet->maxcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	mdata->ps_head = packet->rhqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (get_dma_rtail_setting(rcd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		mdata->ps_tail = get_rcvhdrtail(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (rcd->ctxt == HFI1_CTRL_CTXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			mdata->ps_seq = hfi1_seq_cnt(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			mdata->ps_seq = 0; /* not used with DMA_RTAIL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		mdata->ps_seq = hfi1_seq_cnt(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			  struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (get_dma_rtail_setting(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		return mdata->ps_head == mdata->ps_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return mdata->ps_seq != rhf_rcv_seq(rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			  struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	 * Control context can potentially receive an invalid rhf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 * Drop such packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		return mdata->ps_seq != rhf_rcv_seq(rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static inline void update_ps_mdata(struct ps_mdata *mdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				   struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	mdata->ps_head += mdata->rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	if (mdata->ps_head >= mdata->maxcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		mdata->ps_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	/* Control context must do seq counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (!get_dma_rtail_setting(rcd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	    rcd->ctxt == HFI1_CTRL_CTXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  * prescan_rxq - search through the receive queue looking for packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * containing Excplicit Congestion Notifications (FECNs, or BECNs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * When an ECN is found, process the Congestion Notification, and toggle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * it off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * This is declared as a macro to allow quick checking of the port to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * the overhead of a function call if not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) #define prescan_rxq(rcd, packet) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		if (rcd->ppd->cc_prescan) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			__prescan_rxq(packet); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) static void __prescan_rxq(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	struct ps_mdata mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	init_ps_mdata(&mdata, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 					 packet->rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		struct rvt_qp *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		struct ib_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		u64 rhf = rhf_to_cpu(rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		u32 etype = rhf_rcv_type(rhf), qpn, bth1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		u8 lnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (ps_done(&mdata, rhf, rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		if (ps_skip(&mdata, rhf, rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		if (etype != RHF_RCV_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		hdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		lnh = ib_get_lnh(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		if (lnh == HFI1_LRH_BTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			packet->ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			packet->grh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		} else if (lnh == HFI1_LRH_GRH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			packet->ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			packet->grh = &hdr->u.l.grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			goto next; /* just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (!hfi1_may_ecn(packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		bth1 = be32_to_cpu(packet->ohdr->bth[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		qpn = bth1 & RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		hfi1_process_ecn_slowpath(qp, packet, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		/* turn off BECN, FECN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		packet->ohdr->bth[1] = cpu_to_be32(bth1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		update_ps_mdata(&mdata, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static void process_rcv_qp_work(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	struct rvt_qp *qp, *nqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	 * Iterate over all QPs waiting to respond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * The list won't change since the IRQ is only run on one CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		list_del_init(&qp->rspwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if (qp->r_flags & RVT_R_RSP_NAK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			qp->r_flags &= ~RVT_R_RSP_NAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			packet->qp = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			hfi1_send_rc_ack(packet, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		if (qp->r_flags & RVT_R_RSP_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			qp->r_flags &= ~RVT_R_RSP_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			if (ib_rvt_state_ops[qp->state] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 					RVT_PROCESS_OR_FLUSH_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		rvt_put_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			/* allow defered processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			process_rcv_qp_work(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		return RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		this_cpu_inc(*packet->rcd->dd->rcv_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		return RCV_PKT_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static inline int check_max_packet(struct hfi1_packet *packet, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	int ret = RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		ret = max_packet_exceeded(packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	packet->rcd->dd->ctx0_seq_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* Set up for the next packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	packet->rhqoff += packet->rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (packet->rhqoff >= packet->maxcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		packet->rhqoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	packet->numpkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	ret = check_max_packet(packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				     packet->rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static void process_rcv_packet_napi(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	packet->etype = rhf_rcv_type(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	/* total length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/* retrieve eager buffer details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	packet->etail = rhf_egr_index(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				  &packet->updegr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * Prefetch the contents of the eager buffer.  It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * OK to send a negative length to prefetch_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * The +2 is the size of the RHF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	prefetch_range(packet->ebuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		       packet->tlen - ((packet->rcd->rcvhdrqentsize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				       (rhf_hdrq_offset(packet->rhf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 					+ 2)) * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	packet->rcd->rhf_rcv_function_map[packet->etype](packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	packet->numpkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	/* Set up for the next packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	packet->rhqoff += packet->rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (packet->rhqoff >= packet->maxcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		packet->rhqoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 				      packet->rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	packet->etype = rhf_rcv_type(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/* total length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/* retrieve eager buffer details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	packet->ebuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (rhf_use_egr_bfr(packet->rhf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		packet->etail = rhf_egr_index(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				 &packet->updegr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		 * Prefetch the contents of the eager buffer.  It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		 * OK to send a negative length to prefetch_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		 * The +2 is the size of the RHF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		prefetch_range(packet->ebuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			       packet->tlen - ((get_hdrqentsize(packet->rcd) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 					       (rhf_hdrq_offset(packet->rhf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 						+ 2)) * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 * Call a type specific handler for the packet. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	 * should be able to trust that etype won't be beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	 * the range of valid indexes. If so something is really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	 * wrong and we can probably just let things come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * crashing down. There is no need to eat another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * comparison in this performance critical code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	packet->rcd->rhf_rcv_function_map[packet->etype](packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	packet->numpkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	/* Set up for the next packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	packet->rhqoff += packet->rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (packet->rhqoff >= packet->maxcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		packet->rhqoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	ret = check_max_packet(packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 				      packet->rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) static inline void process_rcv_update(int last, struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * Update head regs etc., every 16 packets, if not last pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 * to help prevent rcvhdrq overflows, when many packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 * are processed and queue is nearly full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * Don't request an interrupt for intermediate updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (!last && !(packet->numpkt & 0xf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			       packet->etail, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		packet->updegr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	packet->grh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) static inline void finish_packet(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	 * Nothing we need to free for the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	 * The only thing we need to do is a final update and call for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	 * interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		       packet->etail, rcv_intr_dynamic, packet->numpkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * handle_receive_interrupt_napi_fp - receive a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * @rcd: the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * @budget: polling budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * Called from interrupt handler for receive interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * This is the fast path interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * when executing napi soft irq environment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct hfi1_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	init_packet(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	while (packet.numpkt < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		process_rcv_packet_napi(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		process_rcv_update(0, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	hfi1_set_rcd_head(rcd, packet.rhqoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	finish_packet(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	return packet.numpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * Handle receive interrupts when using the no dma rtail option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	int last = RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct hfi1_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	init_packet(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	prescan_rxq(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	while (last == RCV_PKT_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		last = process_rcv_packet(&packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		process_rcv_update(last, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	process_rcv_qp_work(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	hfi1_set_rcd_head(rcd, packet.rhqoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	finish_packet(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	return last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	u32 hdrqtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	int last = RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct hfi1_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	init_packet(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	hdrqtail = get_rcvhdrtail(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (packet.rhqoff == hdrqtail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	prescan_rxq(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	while (last == RCV_PKT_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		last = process_rcv_packet(&packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		if (packet.rhqoff == hdrqtail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		process_rcv_update(last, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	process_rcv_qp_work(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	hfi1_set_rcd_head(rcd, packet.rhqoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	finish_packet(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	return last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * For dynamically allocated kernel contexts (like vnic) switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * interrupt handler only for that context. Otherwise, switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 * interrupt handler for all statically allocated kernel contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		hfi1_rcd_get(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		hfi1_set_fast(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			hfi1_set_fast(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) void set_all_slowpath(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	/* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		if (!rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			rcd->do_interrupt = rcd->slow_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) static bool __set_armed_to_active(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	u8 etype = rhf_rcv_type(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	u8 sc = SC15_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (etype == RHF_RCV_TYPE_IB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 							   packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		sc = hfi1_9B_get_sc5(hdr, packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	} else if (etype == RHF_RCV_TYPE_BYPASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		struct hfi1_16b_header *hdr = hfi1_get_16B_header(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 						packet->rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 						packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		sc = hfi1_16B_get_sc(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (sc != SC15_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		int hwstate = driver_lstate(packet->rcd->ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		struct work_struct *lsaw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 				&packet->rcd->ppd->linkstate_active_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (hwstate != IB_PORT_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			dd_dev_info(packet->rcd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				    "Unexpected link state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				    opa_lstate_name(hwstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		queue_work(packet->rcd->ppd->link_wq, lsaw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * armed to active - the fast path for armed to active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * @packet: the packet structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * Return true if packet processing needs to bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static bool set_armed_to_active(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	return __set_armed_to_active(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  * handle_receive_interrupt - receive a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  * @rcd: the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * Called from interrupt handler for errors or receive interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * This is the slow path interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	u32 hdrqtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	int needset, last = RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct hfi1_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	int skip_pkt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (!rcd->rcvhdrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		return RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/* Control context will always use the slow path interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	init_packet(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (!get_dma_rtail_setting(rcd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		hdrqtail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		hdrqtail = get_rcvhdrtail(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		if (packet.rhqoff == hdrqtail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		 * Control context can potentially receive an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		 * rhf. Drop such packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		if (rcd->ctxt == HFI1_CTRL_CTXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				skip_pkt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	prescan_rxq(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	while (last == RCV_PKT_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		if (hfi1_need_drop(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			/* On to the next packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			packet.rhqoff += packet.rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					  packet.rhqoff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 					  rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			packet.rhf = rhf_to_cpu(packet.rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		} else if (skip_pkt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			last = skip_rcv_packet(&packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			skip_pkt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			if (set_armed_to_active(&packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			last = process_rcv_packet(&packet, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		if (!get_dma_rtail_setting(rcd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 				last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			if (packet.rhqoff == hdrqtail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 				last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			 * Control context can potentially receive an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			 * rhf. Drop such packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			if (rcd->ctxt == HFI1_CTRL_CTXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				bool lseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 				lseq = hfi1_seq_incr(rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 						     rhf_rcv_seq(packet.rhf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 				if (!last && lseq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 					skip_pkt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		if (needset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			needset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			set_all_fastpath(dd, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		process_rcv_update(last, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	process_rcv_qp_work(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	hfi1_set_rcd_head(rcd, packet.rhqoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 * Always write head at end, and setup rcv interrupt, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * if no packets were processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	finish_packet(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	return last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * handle_receive_interrupt_napi_sp - receive a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * @rcd: the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * @budget: polling budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * Called from interrupt handler for errors or receive interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  * This is the slow path interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * when executing napi soft irq environment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	int last = RCV_PKT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	bool needset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	struct hfi1_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	init_packet(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	while (last != RCV_PKT_DONE && packet.numpkt < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		if (hfi1_need_drop(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			/* On to the next packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			packet.rhqoff += packet.rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 					  packet.rhqoff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 					  rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			packet.rhf = rhf_to_cpu(packet.rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			if (set_armed_to_active(&packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			process_rcv_packet_napi(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			last = RCV_PKT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		if (needset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			needset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			set_all_fastpath(dd, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		process_rcv_update(last, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	hfi1_set_rcd_head(rcd, packet.rhqoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	 * Always write head at end, and setup rcv interrupt, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	 * if no packets were processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	finish_packet(&packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return packet.numpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * We may discover in the interrupt that the hardware link state has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  * and we need to update the driver's notion of the link state.  We cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * run set_link_state from interrupt context, so we queue this function on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  * a workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  * We delay the regular interrupt processing until after the state changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  * so that the link will be in the correct state by the time any application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * we wake up attempts to send a reply to any message it received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * (Subsequent receive interrupts may possibly force the wakeup before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * update the link state.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  * so we're safe from use-after-free of the rcd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) void receive_interrupt_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 						  linkstate_active_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	/* Received non-SC15 packet implies neighbor_normal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	ppd->neighbor_normal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	set_link_state(ppd, HLS_UP_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	 * Interrupt all statically allocated kernel contexts that could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	 * have had an interrupt during auto activation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		if (rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			force_recv_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * Convert a given MTU size to the on-wire MAD packet enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * Return -1 if the size is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) int mtu_to_enum(u32 mtu, int default_if_bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	switch (mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	case     0: return OPA_MTU_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	case   256: return OPA_MTU_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	case   512: return OPA_MTU_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	case  1024: return OPA_MTU_1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	case  2048: return OPA_MTU_2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	case  4096: return OPA_MTU_4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	case  8192: return OPA_MTU_8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	case 10240: return OPA_MTU_10240;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	return default_if_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) u16 enum_to_mtu(int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	switch (mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	case OPA_MTU_0:     return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	case OPA_MTU_256:   return 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	case OPA_MTU_512:   return 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	case OPA_MTU_1024:  return 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	case OPA_MTU_2048:  return 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	case OPA_MTU_4096:  return 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	case OPA_MTU_8192:  return 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	case OPA_MTU_10240: return 10240;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	default: return 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * set_mtu - set the MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  * @ppd: the per port data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * We can handle "any" incoming size, the issue here is whether we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  * need to restrict our outgoing size.  We do not deal with what happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * to programs that are already running when the size changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) int set_mtu(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	int i, drain, ret = 0, is_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	ppd->ibmtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	for (i = 0; i < ppd->vls_supported; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		if (ppd->ibmtu < dd->vld[i].mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			ppd->ibmtu = dd->vld[i].mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	mutex_lock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (ppd->host_link_state == HLS_UP_INIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	    ppd->host_link_state == HLS_UP_ARMED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	    ppd->host_link_state == HLS_UP_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		is_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	drain = !is_ax(dd) && is_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	if (drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		 * MTU is specified per-VL. To ensure that no packet gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		 * stuck (due, e.g., to the MTU for the packet's VL being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		 * reduced), empty the per-VL FIFOs before adjusting MTU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		ret = stop_drain_data_vls(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			   __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		open_fill_data_vls(dd); /* reopen all VLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	mutex_unlock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	ppd->lid = lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	ppd->lmc = lmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) void shutdown_led_override(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	 * This pairs with the memory barrier in hfi1_start_led_override to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	 * ensure that we read the correct state of LED beaconing represented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	 * by led_override_timer_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (atomic_read(&ppd->led_override_timer_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		del_timer_sync(&ppd->led_override_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		atomic_set(&ppd->led_override_timer_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		/* Ensure the atomic_set is visible to all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/* Hand control of the LED to the DC for normal operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	write_csr(dd, DCC_CFG_LED_CNTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static void run_led_override(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	int phase_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (!(dd->flags & HFI1_INITTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	phase_idx = ppd->led_override_phase & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	setextled(dd, phase_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	timeout = ppd->led_override_vals[phase_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	/* Set up for next phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	ppd->led_override_phase = !ppd->led_override_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	mod_timer(&ppd->led_override_timer, jiffies + timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  * To have the LED blink in a particular pattern, provide timeon and timeoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  * in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  * To turn off custom blinking and return to normal operation, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * shutdown_led_override()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			     unsigned int timeoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (!(ppd->dd->flags & HFI1_INITTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	/* Convert to jiffies for direct use in timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	/* Arbitrarily start from LED on phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	ppd->led_override_phase = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	 * If the timer has not already been started, do so. Use a "quick"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	 * timeout so the handler will be called soon to look at our request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (!timer_pending(&ppd->led_override_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		timer_setup(&ppd->led_override_timer, run_led_override, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		ppd->led_override_timer.expires = jiffies + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		add_timer(&ppd->led_override_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		atomic_set(&ppd->led_override_timer_active, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		/* Ensure the atomic_set is visible to all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  * hfi1_reset_device - reset the chip if possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  * @unit: the device to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  * Whether or not reset is successful, we attempt to re-initialize the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  * (that is, much like a driver unload/reload).  We clear the INITTED flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * so that the various entry points will fail until we reinitialize.  For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * now, we only allow this if no user contexts are open that use chip resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int hfi1_reset_device(int unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct hfi1_devdata *dd = hfi1_lookup(unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	int pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (!dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	dd_dev_info(dd, "Reset on unit %u requested\n", unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			    "Invalid unit number %u or not initialized or not present\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			    unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	/* If there are any user/vnic contexts, we cannot reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	mutex_lock(&hfi1_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	if (dd->rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		if (hfi1_stats.sps_ctxts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			mutex_unlock(&hfi1_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	mutex_unlock(&hfi1_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		shutdown_led_override(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (dd->flags & HFI1_HAS_SEND_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		sdma_exit(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	hfi1_reset_cpu_counters(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	ret = hfi1_init(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			   "Reinitialize unit %u after reset failed with %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			   unit, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			    unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	packet->hdr = (struct hfi1_ib_message_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			hfi1_get_msgheader(packet->rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 					   packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	struct hfi1_pportdata *ppd = packet->rcd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	/* slid and dlid cannot be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if ((!packet->slid) || (!packet->dlid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	/* Compare port lid with incoming packet dlid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	    (packet->dlid !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	/* No multicast packets with SC15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	/* Packets with permissive DLID always on SC15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 					 16B)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	    (packet->sc != 0xF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	struct ib_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	u8 lnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	hfi1_setup_ib_header(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	hdr = packet->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	lnh = ib_get_lnh(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (lnh == HFI1_LRH_BTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		packet->ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		packet->grh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	} else if (lnh == HFI1_LRH_GRH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		u32 vtf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		packet->ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		packet->grh = &hdr->u.l.grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		vtf = be32_to_cpu(packet->grh->version_tclass_flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	/* Query commonly used fields from packet header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	packet->payload = packet->ebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	packet->opcode = ib_bth_get_opcode(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	packet->slid = ib_get_slid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	packet->dlid = ib_get_dlid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		     (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 				be16_to_cpu(IB_MULTICAST_LID_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	packet->sl = ib_get_sl(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	packet->pad = ib_bth_get_pad(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	packet->extra_byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	packet->pkey = ib_bth_get_pkey(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	packet->migrated = ib_bth_is_migration(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	 * Bypass packets have a different header/payload split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	 * compared to an IB packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	 * Current split is set such that 16 bytes of the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	 * header is in the header buffer and the remining is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	 * the eager buffer. We chose 16 since hfi1 driver only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	 * supports 16B bypass packets and we will be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	 * receive the entire LRH with such a split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	struct hfi1_pportdata *ppd = rcd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	struct hfi1_ibport *ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	u8 l4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	packet->hdr = (struct hfi1_16b_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			hfi1_get_16B_header(packet->rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 					    packet->rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	l4 = hfi1_16B_get_l4(packet->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (l4 == OPA_16B_L4_IB_LOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		packet->ohdr = packet->ebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		packet->grh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		packet->opcode = ib_bth_get_opcode(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		/* hdr_len_by_opcode already has an IB LRH factored in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		packet->hlen = hdr_len_by_opcode[packet->opcode] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			(LRH_16B_BYTES - LRH_9B_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		packet->migrated = opa_bth_is_migration(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	} else if (l4 == OPA_16B_L4_IB_GLOBAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		u32 vtf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		u8 grh_len = sizeof(struct ib_grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		packet->ohdr = packet->ebuf + grh_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		packet->grh = packet->ebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		packet->opcode = ib_bth_get_opcode(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		/* hdr_len_by_opcode already has an IB LRH factored in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		packet->hlen = hdr_len_by_opcode[packet->opcode] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			(LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		packet->migrated = opa_bth_is_migration(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		vtf = be32_to_cpu(packet->grh->version_tclass_flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	} else if (l4 == OPA_16B_L4_FM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		packet->mgmt = packet->ebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		packet->ohdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		packet->grh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		packet->opcode = IB_OPCODE_UD_SEND_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		packet->pad = OPA_16B_L4_FM_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		packet->hlen = OPA_16B_L4_FM_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		packet->migrated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	/* Query commonly used fields from packet header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	packet->slid = hfi1_16B_get_slid(packet->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	packet->dlid = hfi1_16B_get_dlid(packet->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 				opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 					    16B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	packet->sc = hfi1_16B_get_sc(packet->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	packet->sl = ibp->sc_to_sl[packet->sc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	packet->extra_byte = SIZE_OF_LT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	packet->pkey = hfi1_16B_get_pkey(packet->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	if (hfi1_bypass_ingress_pkt_check(packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) static void show_eflags_errs(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	u32 rte = rhf_rcv_type_err(packet->rhf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	dd_dev_err(rcd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		   "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		   rcd->ctxt, packet->rhf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		   packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		   packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		   packet->rhf & RHF_DC_ERR ? "dc " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		   packet->rhf & RHF_TID_ERR ? "tid " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		   packet->rhf & RHF_LEN_ERR ? "len " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		   packet->rhf & RHF_ECC_ERR ? "ecc " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		   packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		   rte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) void handle_eflags(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	rcv_hdrerr(rcd, rcd->ppd, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (rhf_err_flags(packet->rhf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		show_eflags_errs(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	struct napi_struct *napi = rcd->napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	struct hfi1_netdev_rxq *rxq = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			struct hfi1_netdev_rxq, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	u32 extra_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	u32 tlen, qpnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	bool do_work, do_cnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	struct hfi1_ipoib_dev_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	trace_hfi1_rcvhdr(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	hfi1_setup_ib_header(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	packet->grh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	if (unlikely(rhf_err_flags(packet->rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		handle_eflags(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	qpnum = ib_bth_get_qpn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	netdev = hfi1_netdev_get_data(rcd->dd, qpnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		goto drop_no_nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	trace_ctxt_rsm_hist(rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	/* handle congestion notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	do_work = hfi1_may_ecn(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	if (unlikely(do_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		do_cnp = (packet->opcode != IB_OPCODE_CNP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		(void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 						 packet, do_cnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	 * We have split point after last byte of DETH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	 * lets strip padding and CRC and ICRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	 * tlen is whole packet len so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	 * subtract header size as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	tlen = packet->tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			packet->hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (unlikely(tlen < extra_bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	tlen -= extra_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	priv = hfi1_ipoib_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	skb->dev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	skb->pkt_type = PACKET_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	++netdev->stats.rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) drop_no_nd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	ibp = rcd_to_iport(packet->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	++ibp->rvp.n_pkt_drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * The following functions are called by the interrupt handler. They are type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * specific handlers for each packet type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static void process_receive_ib(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	if (hfi1_setup_9B_packet(packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	trace_hfi1_rcvhdr(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (unlikely(rhf_err_flags(packet->rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		handle_eflags(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	hfi1_ib_rcv(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static void process_receive_bypass(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	struct hfi1_devdata *dd = packet->rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	if (hfi1_setup_bypass_packet(packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	trace_hfi1_rcvhdr(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (unlikely(rhf_err_flags(packet->rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		handle_eflags(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		hfi1_16B_rcv(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			   "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		if (!(dd->err_info_rcvport.status_and_code &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		      OPA_EI_STATUS_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			u64 *flits = packet->ebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			if (flits && !(packet->rhf & RHF_LEN_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 				dd->err_info_rcvport.packet_flit1 = flits[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 				dd->err_info_rcvport.packet_flit2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 					packet->tlen > sizeof(flits[0]) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 					flits[1] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			dd->err_info_rcvport.status_and_code |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 				(OPA_EI_STATUS_SMASK | BAD_L2_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static void process_receive_error(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	if (unlikely(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		  packet->rhf & RHF_DC_ERR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	hfi1_setup_ib_header(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	handle_eflags(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	if (unlikely(rhf_err_flags(packet->rhf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		dd_dev_err(packet->rcd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			   "Unhandled error packet received. Dropping.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static void kdeth_process_expected(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	hfi1_setup_9B_packet(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	if (unlikely(rhf_err_flags(packet->rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	hfi1_kdeth_expected_rcv(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static void kdeth_process_eager(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	hfi1_setup_9B_packet(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	trace_hfi1_rcvhdr(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	if (unlikely(rhf_err_flags(packet->rhf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		show_eflags_errs(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	hfi1_kdeth_eager_rcv(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static void process_receive_invalid(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		   rhf_rcv_type(packet->rhf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) #define HFI1_RCVHDR_DUMP_MAX	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	struct hfi1_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	struct ps_mdata mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu  sw head %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		   rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		   get_dma_rtail_setting(rcd) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		   "dma_rtail" : "nodma_rtail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		   read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		   read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		   RCV_HDR_HEAD_HEAD_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		   rcd->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	init_packet(rcd, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	init_ps_mdata(&mdata, &packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 					 rcd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		struct ib_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		u64 rhf = rhf_to_cpu(rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		u32 etype = rhf_rcv_type(rhf), qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		u32 psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		u8 lnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		if (ps_done(&mdata, rhf, rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		if (ps_skip(&mdata, rhf, rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		if (etype > RHF_RCV_TYPE_IB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		hdr = packet.hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		lnh = be16_to_cpu(hdr->lrh[0]) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		if (lnh == HFI1_LRH_BTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			packet.ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		else if (lnh == HFI1_LRH_GRH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			packet.ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			goto next; /* just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			   mdata.ps_head, opcode, qpn, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		update_ps_mdata(&mdata, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	[RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	[RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	[RHF_RCV_TYPE_IB] = process_receive_ib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	[RHF_RCV_TYPE_ERROR] = process_receive_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	[RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	[RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	[RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	[RHF_RCV_TYPE_EXPECTED] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	[RHF_RCV_TYPE_EAGER] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	[RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	[RHF_RCV_TYPE_ERROR] = process_receive_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	[RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	[RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	[RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) };