^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015 - 2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <rdma/rdma_vt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include "mad.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "sdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include "debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "verbs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "aspm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include "affinity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "vnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include "exp_rcv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include "netdev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define pr_fmt(fmt) DRIVER_NAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * min buffers we want to have per context, after driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define HFI1_MIN_USER_CTXT_BUFCNT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define NUM_IB_PORTS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Number of user receive contexts we are configured to use (to allow for more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int num_user_contexts = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) module_param_named(num_user_contexts, num_user_contexts, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_PARM_DESC(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uint krcvqs[RXE_NUM_DATA_VL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int krcvqsset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* computed based on above array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long n_krcvqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static unsigned hfi1_rcvarr_split = 25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static uint eager_buffer_size = (8 << 20); /* 8MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) module_param(eager_buffer_size, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static uint hfi1_hdrq_entsize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned int user_credit_return_threshold = 33; /* default is 33% */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) module_param(user_credit_return_threshold, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int hfi1_create_kctxt(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Control context has to be always 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dd_dev_err(dd, "Kernel receive context allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Set up the kernel context flags here and now because they use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * default values for all receive side memories. User contexts will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * be handled as they are created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) HFI1_CAP_KGET(NODROP_RHQ_FULL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) HFI1_CAP_KGET(NODROP_EGR_FULL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) HFI1_CAP_KGET(DMA_RTAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Control context must use DMA_RTAIL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (rcd->ctxt == HFI1_CTRL_CTXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rcd->flags |= HFI1_CAP_DMA_RTAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) rcd->fast_handler = get_dma_rtail_setting(rcd) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) handle_receive_interrupt_dma_rtail :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) handle_receive_interrupt_nodma_rtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) hfi1_set_seq_cnt(rcd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!rcd->sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dd_dev_err(dd, "Kernel send context allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) hfi1_init_ctxt(rcd->sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Create the receive context array and one or more kernel contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int hfi1_create_kctxts(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) GFP_KERNEL, dd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!dd->rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ret = hfi1_create_kctxt(dd, dd->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) hfi1_free_ctxt(dd->rcd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* All the contexts should be freed, free the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) kfree(dd->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dd->rcd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Helper routines for the receive context reference count (rcd and uctxt).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kref_init(&rcd->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * hfi1_rcd_free - When reference is zero clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @kref: pointer to an initialized rcd data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void hfi1_rcd_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct hfi1_ctxtdata *rcd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) container_of(kref, struct hfi1_ctxtdata, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) rcd->dd->rcd[rcd->ctxt] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) hfi1_free_ctxtdata(rcd->dd, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kfree(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * hfi1_rcd_put - decrement reference for rcd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @rcd: pointer to an initialized rcd data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Use this to put a reference after the init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return kref_put(&rcd->kref, hfi1_rcd_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * hfi1_rcd_get - increment reference for rcd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @rcd: pointer to an initialized rcd data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Use this to get a reference after the init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Return : reflect kref_get_unless_zero(), which returns non-zero on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * increment, otherwise 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return kref_get_unless_zero(&rcd->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * allocate_rcd_index - allocate an rcd index from the rcd array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @dd: pointer to a valid devdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @rcd: rcd data structure to assign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @index: pointer to index that is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Find an empty index in the rcd array, and assign the given rcd to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * If the array is full, we are EBUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int allocate_rcd_index(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct hfi1_ctxtdata *rcd, u16 *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u16 ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spin_lock_irqsave(&dd->uctxt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!dd->rcd[ctxt])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (ctxt < dd->num_rcv_contexts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rcd->ctxt = ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) dd->rcd[ctxt] = rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) hfi1_rcd_init(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock_irqrestore(&dd->uctxt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ctxt >= dd->num_rcv_contexts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *index = ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @dd: pointer to a valid devdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @ctxt: the index of an possilbe rcd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * ctxt index is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * The caller is responsible for making the _put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u16 ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (ctxt < dd->num_rcv_contexts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return hfi1_rcd_get_by_index(dd, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * hfi1_rcd_get_by_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @dd: pointer to a valid devdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * @ctxt: the index of an possilbe rcd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * We need to protect access to the rcd array. If access is needed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * one or more index, get the protecting spinlock and then increment the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * kref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * The caller is responsible for making the _put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct hfi1_ctxtdata *rcd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_lock_irqsave(&dd->uctxt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (dd->rcd[ctxt]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rcd = dd->rcd[ctxt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!hfi1_rcd_get(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rcd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_unlock_irqrestore(&dd->uctxt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Common code for user and kernel context create and setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * NOTE: the initial kref is done here (hf1_rcd_init()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct hfi1_ctxtdata **context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned kctxt_ngroups = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u32 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (dd->rcv_entries.nctxt_extra >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (rcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u32 rcvtids, max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u16 ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ret = allocate_rcd_index(dd, rcd, &ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) *context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kfree(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) INIT_LIST_HEAD(&rcd->qp_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) hfi1_exp_tid_group_init(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rcd->ppd = ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) rcd->dd = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) rcd->numa_id = numa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) rcd->rcv_array_groups = dd->rcv_entries.ngroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rcd->slow_handler = handle_receive_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rcd->do_interrupt = rcd->slow_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mutex_init(&rcd->exp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) spin_lock_init(&rcd->exp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Calculate the context's RcvArray entry starting point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * We do this here because we have to take into account all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * the RcvArray entries that previous context would have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * taken and we have to account for any extra groups assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * to the static (kernel) or dynamic (vnic/user) contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (ctxt < dd->first_dyn_alloc_ctxt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (ctxt < kctxt_ngroups) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) base = ctxt * (dd->rcv_entries.ngroups + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rcd->rcv_array_groups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) base = kctxt_ngroups +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) (ctxt * dd->rcv_entries.ngroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) kctxt_ngroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (ct < dd->rcv_entries.nctxt_extra) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) base += ct * (dd->rcv_entries.ngroups + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) rcd->rcv_array_groups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) base += dd->rcv_entries.nctxt_extra +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) (ct * dd->rcv_entries.ngroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rcd->eager_base = base * dd->rcv_entries.group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rcd->rcvhdrq_cnt = rcvhdrcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) rcd->rhf_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Simple Eager buffer allocation: we have already pre-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * the number of RcvArray entry groups. Each ctxtdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * holds the number of groups for that context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * To follow CSR requirements and maintain cacheline alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * make sure all sizes and bases are multiples of group_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * The expected entry count is what is left after assigning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * eager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) max_entries = rcd->rcv_array_groups *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dd->rcv_entries.group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) rcd->egrbufs.count = round_down(rcvtids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dd->rcv_entries.group_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) rcd->egrbufs.count = MAX_EAGER_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) hfi1_cdbg(PROC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) "ctxt%u: max Eager buffer RcvArray entries: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rcd->ctxt, rcd->egrbufs.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Allocate array that will hold the eager buffer accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * This will allocate the maximum possible buffer count based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * on the value of the RcvArray split parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * The resulting value will be rounded down to the closest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * multiple of dd->rcv_entries.group_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) rcd->egrbufs.buffers =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) kcalloc_node(rcd->egrbufs.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sizeof(*rcd->egrbufs.buffers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) GFP_KERNEL, numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!rcd->egrbufs.buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rcd->egrbufs.rcvtids =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) kcalloc_node(rcd->egrbufs.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sizeof(*rcd->egrbufs.rcvtids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) GFP_KERNEL, numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!rcd->egrbufs.rcvtids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) rcd->egrbufs.size = eager_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * The size of the buffers programmed into the RcvArray
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * entries needs to be big enough to handle the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * MTU supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (rcd->egrbufs.size < hfi1_max_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) hfi1_cdbg(PROC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) "ctxt%u: eager bufs size too small. Adjusting to %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) rcd->ctxt, rcd->egrbufs.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Applicable only for statically created kernel contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ctxt < dd->first_dyn_alloc_ctxt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) GFP_KERNEL, numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!rcd->opstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Initialize TID flow generations for the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) hfi1_kern_init_ctxt_generations(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *context = rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) hfi1_free_ctxt(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * hfi1_free_ctxt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * @rcd: pointer to an initialized rcd data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * This wrapper is the free function that matches hfi1_create_ctxtdata().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * When a context is done being used (kernel or user), this function is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * for the "final" put to match the kref init from hf1i_create_ctxtdata().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Other users of the context do a get/put sequence to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * structure isn't removed while in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Select the largest ccti value over all SLs to determine the intra-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * packet gap for the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * called with cca_timer_lock held (to protect access to cca_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * array), and rcu_read_lock() (to protect access to cc_state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) void set_link_ipg(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u16 cce, ccti_limit, max_ccti = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u16 shift, mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u64 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u32 current_egress_rate; /* Mbits /sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u32 max_pkt_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * max_pkt_time is the maximum packet egress time in units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * of the fabric clock period 1/(805 MHz).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cc_state = get_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!cc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * This should _never_ happen - rcu_read_lock() is held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * and set_link_ipg() should not be called if cc_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) for (i = 0; i < OPA_MAX_SLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u16 ccti = ppd->cca_timer[i].ccti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (ccti > max_ccti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) max_ccti = ccti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ccti_limit = cc_state->cct.ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (max_ccti > ccti_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) max_ccti = ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) cce = cc_state->cct.entries[max_ccti].entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) shift = (cce & 0xc000) >> 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) mult = (cce & 0x3fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) current_egress_rate = active_egress_rate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) src = (max_pkt_time >> shift) * mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct cca_timer *cca_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) u16 ccti_timer, ccti_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) enum hrtimer_restart ret = HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cca_timer = container_of(t, struct cca_timer, hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ppd = cca_timer->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) sl = cca_timer->sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cc_state = get_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!cc_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * 1) decrement ccti for SL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * 2) calculate IPG for link (set_link_ipg())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * 3) restart timer, unless ccti is at min value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) spin_lock_irqsave(&ppd->cca_timer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (cca_timer->ccti > ccti_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) cca_timer->ccti--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) set_link_ipg(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (cca_timer->ccti > ccti_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned long nsec = 1024 * ccti_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* ccti_timer is in units of 1.024 usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) hrtimer_forward_now(t, ns_to_ktime(nsec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Common code for initializing the physical port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) uint default_pkey_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ppd->dd = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ppd->hw_pidx = hw_pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ppd->port = port; /* IB port number, not index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ppd->prev_link_width = LINK_WIDTH_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * There are C_VL_COUNT number of PortVLXmitWait counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (i = 0; i < C_VL_COUNT + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) ppd->port_vl_xmit_wait_last[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ppd->vl_xmit_flit_cnt[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) default_pkey_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ppd->pkeys[0] = 0x8001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) INIT_WORK(&ppd->link_up_work, handle_link_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) INIT_WORK(&ppd->link_down_work, handle_link_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) INIT_WORK(&ppd->freeze_work, handle_freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) INIT_WORK(&ppd->sma_message_work, handle_sma_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) mutex_init(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spin_lock_init(&ppd->qsfp_info.qsfp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ppd->qsfp_info.ppd = ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ppd->sm_trap_qp = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ppd->sa_qp = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ppd->hfi1_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) spin_lock_init(&ppd->cca_timer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) for (i = 0; i < OPA_MAX_SLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ppd->cca_timer[i].ppd = ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ppd->cca_timer[i].sl = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ppd->cca_timer[i].ccti = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_lock_init(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) spin_lock_init(&ppd->cc_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) RCU_INIT_POINTER(ppd->cc_state, cc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!cc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Do initialization for device that is only needed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * first detect, not on resets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static int loadtime_init(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * init_after_reset - re-initialize after a reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * sanity check at least some of the values after reset, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * ensure no receive or transmit (explicitly, in case reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static int init_after_reset(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * Ensure chip does no sends or receives, tail updates, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * pioavail updates while we re-initialize. This is mostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * for the driver data structures, not chip registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (i = 0; i < dd->num_rcv_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) HFI1_RCVCTRL_INTRAVAIL_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) HFI1_RCVCTRL_TAILUPD_DIS, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pio_send_control(dd, PSC_GLOBAL_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) for (i = 0; i < dd->num_send_contexts; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) sc_disable(dd->send_contexts[i].sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static void enable_chip(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) u32 rcvmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* enable PIO send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pio_send_control(dd, PSC_GLOBAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Enable kernel ctxts' receive and receive interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * Other ctxts done as user opens and initializes them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (HFI1_CAP_IS_KSET(TID_RDMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) hfi1_rcvctrl(dd, rcvmask, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) sc_enable(rcd->sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * create_workqueues - create per port workqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int create_workqueues(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) int pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!ppd->hfi1_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ppd->hfi1_wq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) alloc_workqueue(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) "hfi%d_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) WQ_MEM_RECLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dd->unit, pidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!ppd->hfi1_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto wq_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (!ppd->link_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Make the link workqueue single-threaded to enforce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ppd->link_wq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) alloc_workqueue(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) "hfi_link_%d_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 1, /* max_active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dd->unit, pidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (!ppd->link_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto wq_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) wq_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (ppd->hfi1_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) destroy_workqueue(ppd->hfi1_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ppd->hfi1_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (ppd->link_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) destroy_workqueue(ppd->link_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ppd->link_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * destroy_workqueues - destroy per port workqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void destroy_workqueues(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (ppd->hfi1_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) destroy_workqueue(ppd->hfi1_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ppd->hfi1_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (ppd->link_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) destroy_workqueue(ppd->link_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ppd->link_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * enable_general_intr() - Enable the IRQs that will be handled by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * general interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * @dd: valid devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void enable_general_intr(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * hfi1_init - do the actual initialization sequence on the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * @reinit: re-initializing, so don't allocate new memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Do the actual initialization sequence on the chip. This is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * both from the init routine called from the PCI infrastructure, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * when we reset the chip, or detect that it was reset internally,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * or it's administratively re-enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Memory allocation here and in called routines is only done in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * the first case (reinit == 0). We have to be careful, because even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * without memory allocation, we need to re-write all the chip registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * TIDs, etc. after the reset or enable has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int hfi1_init(struct hfi1_devdata *dd, int reinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int ret = 0, pidx, lastfail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* Set up send low level handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dd->process_pio_send = hfi1_verbs_send_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dd->process_dma_send = hfi1_verbs_send_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dd->pio_inline_send = pio_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dd->process_vnic_dma_send = hfi1_vnic_send_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) atomic_set(&dd->drop_packet, DROP_PACKET_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dd->do_drop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dd->do_drop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* make sure the link is not "up" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ppd->linkup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (reinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ret = init_after_reset(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ret = loadtime_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* dd->rcd can be NULL if early initialization failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * re-init, the simplest way to handle this is to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * existing, and re-allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Need to re-create rest of ctxt 0 ctxtdata as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) lastfail = hfi1_create_rcvhdrq(dd, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!lastfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) lastfail = hfi1_setup_eagerbufs(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!lastfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (lastfail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ret = lastfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* Allocate enough memory for user event notification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) sizeof(*dd->events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) dd->events = vmalloc_user(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!dd->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dd_dev_err(dd, "Failed to allocate user events page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * Allocate a page for device and port status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Page will be shared amongst all user processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dd->status = vmalloc_user(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!dd->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dd_dev_err(dd, "Failed to allocate dev status page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (dd->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* Currently, we only have one port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ppd->statusp = &dd->status->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) set_mtu(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* enable chip even if we have an error, so we can debug cause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) enable_chip(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * Set status even if port serdes is not initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * so that diags will work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (dd->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) HFI1_STATUS_INITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* enable all interrupts from the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) enable_general_intr(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) init_qsfp_int(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* chip is OK for user apps; mark it as initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * start the serdes - must be after interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * enabled so we are notified when the link goes up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) lastfail = bringup_serdes(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (lastfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) "Failed to bring up port %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ppd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Set status even if port serdes is not initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * so that diags will work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (ppd->statusp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) HFI1_STATUS_INITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!ppd->link_speed_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* if ret is non-zero, we probably should do some cleanup here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct hfi1_devdata *hfi1_lookup(int unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return xa_load(&hfi1_dev_table, unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * Stop the timers during unit shutdown, or after an error late
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * in initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void stop_timers(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ppd->led_override_timer.function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) del_timer_sync(&ppd->led_override_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) atomic_set(&ppd->led_override_timer_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * shutdown_device - shut down a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * This is called to make the device quiet when we are about to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * unload the driver, and also when the device is administratively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * disabled. It does not free any data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * Everything it does has to be setup again by hfi1_init(dd, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void shutdown_device(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) unsigned pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (dd->flags & HFI1_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dd->flags |= HFI1_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ppd->linkup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ppd->statusp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) HFI1_STATUS_IB_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) dd->flags &= ~HFI1_INITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* mask and clean up interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) msix_clean_up_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) for (i = 0; i < dd->num_rcv_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) HFI1_RCVCTRL_CTXT_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) HFI1_RCVCTRL_INTRAVAIL_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) HFI1_RCVCTRL_PKEY_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Gracefully stop all sends allowing any in progress to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * trickle out first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) for (i = 0; i < dd->num_send_contexts; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) sc_flush(dd->send_contexts[i].sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * Enough for anything that's going to trickle out to have actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * done so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* disable all contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) for (i = 0; i < dd->num_send_contexts; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) sc_disable(dd->send_contexts[i].sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* disable the send device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) pio_send_control(dd, PSC_GLOBAL_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) shutdown_led_override(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * Clear SerdesEnable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * We can't count on interrupts since we are stopping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) hfi1_quiet_serdes(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (ppd->hfi1_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) flush_workqueue(ppd->hfi1_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (ppd->link_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) flush_workqueue(ppd->link_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) sdma_exit(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * hfi1_free_ctxtdata - free a context's allocated data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * @rcd: the ctxtdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * free up any allocated data for a context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * It should never change any chip state, or global driver state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) u32 e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (rcd->rcvhdrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) rcd->rcvhdrq, rcd->rcvhdrq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) rcd->rcvhdrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (hfi1_rcvhdrtail_kvaddr(rcd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) (void *)hfi1_rcvhdrtail_kvaddr(rcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) rcd->rcvhdrqtailaddr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) rcd->rcvhdrtail_kvaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* all the RcvArray entries should have been cleared by now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) kfree(rcd->egrbufs.rcvtids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) rcd->egrbufs.rcvtids = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) for (e = 0; e < rcd->egrbufs.alloced; e++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (rcd->egrbufs.buffers[e].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dma_free_coherent(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) rcd->egrbufs.buffers[e].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rcd->egrbufs.buffers[e].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) rcd->egrbufs.buffers[e].dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) kfree(rcd->egrbufs.buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) rcd->egrbufs.alloced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) rcd->egrbufs.buffers = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) sc_free(rcd->sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rcd->sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) vfree(rcd->subctxt_uregbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) vfree(rcd->subctxt_rcvegrbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) vfree(rcd->subctxt_rcvhdr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) kfree(rcd->opstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) rcd->subctxt_uregbase = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) rcd->subctxt_rcvegrbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) rcd->subctxt_rcvhdr_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) rcd->opstats = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * Release our hold on the shared asic data. If we are the last one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * return the structure to be finalized outside the lock. Must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * holding hfi1_dev_table lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct hfi1_asic_data *ad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!dd->asic_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) dd->asic_data->dds[dd->hfi1_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) other = dd->hfi1_id ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ad = dd->asic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) dd->asic_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* return NULL if the other dd still has a link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return ad->dds[other] ? NULL : ad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static void finalize_asic_data(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct hfi1_asic_data *ad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) clean_up_i2c(dd, ad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) kfree(ad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * hfi1_free_devdata - cleans up and frees per-unit data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * @dd: pointer to a valid devdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * It cleans up and frees all data structures set up by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * by hfi1_alloc_devdata().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) void hfi1_free_devdata(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct hfi1_asic_data *ad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) xa_lock_irqsave(&hfi1_dev_table, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) __xa_erase(&hfi1_dev_table, dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ad = release_asic_data(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) xa_unlock_irqrestore(&hfi1_dev_table, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) finalize_asic_data(dd, ad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) free_platform_config(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) rcu_barrier(); /* wait for rcu callbacks to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) free_percpu(dd->int_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) free_percpu(dd->rcv_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) free_percpu(dd->send_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) free_percpu(dd->tx_opstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) dd->int_counter = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dd->rcv_limit = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) dd->send_schedule = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) dd->tx_opstats = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) kfree(dd->comp_vect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) dd->comp_vect = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (dd->rcvhdrtail_dummy_kvaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) (void *)dd->rcvhdrtail_dummy_kvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) dd->rcvhdrtail_dummy_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) dd->rcvhdrtail_dummy_kvaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) sdma_clean(dd, dd->num_sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rvt_dealloc_device(&dd->verbs_dev.rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * @pdev: Valid PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @extra: How many bytes to alloc past the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Must be done via verbs allocator, because the verbs cleanup process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * both does cleanup and free of the data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * "extra" is for chip-specific data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) size_t extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) int ret, nports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* extra is * number of ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) nports = extra / sizeof(struct hfi1_pportdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) dd->num_pports = nports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) dd->pport = (struct hfi1_pportdata *)(dd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) dd->pcidev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) pci_set_drvdata(pdev, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) "Could not allocate unit ID: error %d\n", -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * If the BIOS does not have the NUMA node information set, select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * NUMA 0 so we get consistent performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dd->node = pcibus_to_node(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (dd->node == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dd->node = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Initialize all locks for the device. This needs to be as early as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * possible so locks are usable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) spin_lock_init(&dd->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) spin_lock_init(&dd->sendctrl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) spin_lock_init(&dd->rcvctrl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) spin_lock_init(&dd->uctxt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) spin_lock_init(&dd->hfi1_diag_trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) spin_lock_init(&dd->sc_init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) spin_lock_init(&dd->dc8051_memlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) seqlock_init(&dd->sc2vl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) spin_lock_init(&dd->sde_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) spin_lock_init(&dd->pio_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mutex_init(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) init_waitqueue_head(&dd->event_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) spin_lock_init(&dd->irq_src_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) dd->int_counter = alloc_percpu(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!dd->int_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) dd->rcv_limit = alloc_percpu(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!dd->rcv_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dd->send_schedule = alloc_percpu(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!dd->send_schedule) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!dd->tx_opstats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!dd->comp_vect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* allocate dummy tail memory for all receive contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dd->rcvhdrtail_dummy_kvaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!dd->rcvhdrtail_dummy_kvaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) atomic_set(&dd->ipoib_rsm_usr_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) hfi1_free_devdata(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * Called from freeze mode handlers, and from PCI error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * reporting code. Should be paranoid about state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * system and data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) void hfi1_disable_after_error(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (dd->flags & HFI1_INITTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) u32 pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) dd->flags &= ~HFI1_INITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (dd->pport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (dd->flags & HFI1_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) set_link_state(ppd, HLS_DN_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (ppd->statusp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) *ppd->statusp &= ~HFI1_STATUS_IB_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * Mark as having had an error for driver, and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * for /sys and status word mapped to user programs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * This marks unit as not usable, until reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (dd->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dd->status->dev |= HFI1_STATUS_HWERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static void remove_one(struct pci_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static int init_one(struct pci_dev *, const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static void shutdown_one(struct pci_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) #define PFX DRIVER_NAME ": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) const struct pci_device_id hfi1_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static struct pci_driver hfi1_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) .probe = init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .remove = remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .shutdown = shutdown_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) .id_table = hfi1_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) .err_handler = &hfi1_pci_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static void __init compute_krcvqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) for (i = 0; i < krcvqsset; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) n_krcvqs += krcvqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * Do all the generic driver unit- and chip-independent memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * allocation and initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static int __init hfi1_mod_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ret = dev_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) ret = node_affinity_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* validate max MTU before any devices start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (!valid_opa_max_mtu(hfi1_max_mtu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* valid CUs run from 1-128 in powers of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) hfi1_cu = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* valid credit return threshold is 0-100, variable is unsigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (user_credit_return_threshold > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) user_credit_return_threshold = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) compute_krcvqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * sanitize receive interrupt count, time must wait until after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * the hardware type is known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* reject invalid combinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) rcv_intr_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * Avoid indefinite packet delivery by requiring a timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * if count is > 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) rcv_intr_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * The dynamic algorithm expects a non-zero timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * and a count > 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) rcv_intr_dynamic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* sanitize link CRC options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) link_crc_mask &= SUPPORTED_CRCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ret = opfn_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) pr_err("Failed to allocate opfn_wq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) goto bail_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * These must be called before the driver is registered with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * the PCI subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) hfi1_dbg_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ret = pci_register_driver(&hfi1_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) pr_err("Unable to register driver: error %d\n", -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) goto bail_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) goto bail; /* all OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) bail_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) hfi1_dbg_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) dev_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) module_init(hfi1_mod_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * Do the non-unit driver cleanup, memory free, etc. at unload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void __exit hfi1_mod_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) pci_unregister_driver(&hfi1_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) opfn_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) node_affinity_destroy_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) hfi1_dbg_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) WARN_ON(!xa_empty(&hfi1_dev_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dispose_firmware(); /* asymmetric with obtain_firmware() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) dev_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) module_exit(hfi1_mod_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* this can only be called after a successful initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static void cleanup_device_data(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /* users can't do anything more with chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct hfi1_pportdata *ppd = &dd->pport[pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (ppd->statusp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) for (i = 0; i < OPA_MAX_SLS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) spin_lock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) cc_state = get_cc_state_protected(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) RCU_INIT_POINTER(ppd->cc_state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) spin_unlock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (cc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) kfree_rcu(cc_state, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) free_credit_return(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * Free any resources still in use (usually just kernel contexts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * at unload; we do for ctxtcnt, because that's what we allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (rcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) hfi1_free_ctxt_rcv_groups(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) hfi1_free_ctxt(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) kfree(dd->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) dd->rcd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) free_pio_map(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* must follow rcv context free - need to remove rcv's hooks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) sc_free(dd->send_contexts[ctxt].sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) dd->num_send_contexts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) kfree(dd->send_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) dd->send_contexts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) kfree(dd->hw_to_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) dd->hw_to_sw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) kfree(dd->boardname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) vfree(dd->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) vfree(dd->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * Clean up on unit shutdown, or error during unit load after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * successful initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static void postinit_cleanup(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) hfi1_start_cleanup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) hfi1_comp_vectors_clean_up(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) hfi1_dev_affinity_clean_up(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) hfi1_pcie_ddcleanup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) hfi1_pcie_cleanup(dd->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) cleanup_device_data(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) hfi1_free_devdata(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) int ret = 0, j, pidx, initfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* First, lock the non-writable module parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) HFI1_CAP_LOCK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /* Validate dev ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ent->device == PCI_DEVICE_ID_INTEL1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ent->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /* Allocate the dd so we can get to work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) sizeof(struct hfi1_pportdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (IS_ERR(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) ret = PTR_ERR(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* Validate some global module parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /* use the encoding function as a sanitization check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) hfi1_hdrq_entsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /* The receive eager buffer size must be set before the receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * contexts are created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * Set the eager buffer size. Validate that it falls in a range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * allowed by the hardware - all powers of 2 between the min and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * max. The maximum valid MTU is within the eager buffer range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * so we do not need to cap the max_mtu by an eager buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (eager_buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (!is_power_of_2(eager_buffer_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) eager_buffer_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) roundup_pow_of_two(eager_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) eager_buffer_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) clamp_val(eager_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) MIN_EAGER_BUFFER * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) MAX_EAGER_BUFFER_TOTAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) dd_dev_info(dd, "Eager buffer size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) eager_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* restrict value of hfi1_rcvarr_split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) ret = hfi1_pcie_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * Do device-specific initialization, function table setup, dd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * allocation, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) ret = hfi1_init_dd(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) goto clean_bail; /* error already printed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) ret = create_workqueues(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) goto clean_bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /* do the generic initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) initfail = hfi1_init(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = hfi1_register_ib_device(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * Now ready for use. this should be cleared whenever we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * detect a reset, or initiate one. If earlier failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * we still create devices, so diags, etc. can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * to determine cause of problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (!initfail && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) dd->flags |= HFI1_INITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* create debufs files after init and ib register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) hfi1_dbg_ibdev_init(&dd->verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) j = hfi1_device_create(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (initfail || ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) msix_clean_up_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) stop_timers(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) flush_workqueue(ib_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) hfi1_quiet_serdes(dd->pport + pidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ppd = dd->pport + pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (ppd->hfi1_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) destroy_workqueue(ppd->hfi1_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) ppd->hfi1_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (ppd->link_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) destroy_workqueue(ppd->link_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ppd->link_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (!j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) hfi1_device_remove(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) hfi1_unregister_ib_device(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) postinit_cleanup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (initfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) ret = initfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) goto bail; /* everything already cleaned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) sdma_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) clean_bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) hfi1_pcie_cleanup(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static void wait_for_clients(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * Remove the device init value and complete the device if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * no clients or wait for active clients to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (atomic_dec_and_test(&dd->user_refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) complete(&dd->user_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) wait_for_completion(&dd->user_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static void remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct hfi1_devdata *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /* close debugfs files before ib unregister */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) hfi1_dbg_ibdev_exit(&dd->verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /* remove the /dev hfi1 interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) hfi1_device_remove(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /* wait for existing user space clients to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) wait_for_clients(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* unregister from IB core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) hfi1_unregister_ib_device(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* free netdev data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) hfi1_netdev_free(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * Disable the IB link, disable interrupts on the device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * clear dma engines, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) shutdown_device(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) destroy_workqueues(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) stop_timers(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /* wait until all of our (qsfp) queue_work() calls complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) flush_workqueue(ib_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) postinit_cleanup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) static void shutdown_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct hfi1_devdata *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) shutdown_device(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * hfi1_create_rcvhdrq - create a receive header queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * @rcd: the context data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * This must be contiguous memory (from an i/o perspective), and must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * DMA'able (which means for some systems, it will go through an IOMMU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * or be forced into a low address range).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) unsigned amt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (!rcd->rcvhdrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) amt = rcvhdrq_size(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) gfp_flags = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) gfp_flags = GFP_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) &rcd->rcvhdrq_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) gfp_flags | __GFP_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (!rcd->rcvhdrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) amt, rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) &rcd->rcvhdrqtailaddr_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (!rcd->rcvhdrtail_kvaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) rcd->rcvhdrq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) bail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) rcd->rcvhdrq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) rcd->rcvhdrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * allocate eager buffers, both kernel and user contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * @rcd: the context we are setting up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * Allocate the eager TID buffers and program them into hip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * They are no longer completely contiguous, we do multiple allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * calls. Otherwise we get the OOM code involved, by asking for too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * much per call, with disastrous results on some kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) u32 max_entries, egrtop, alloced_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) u16 order, idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * GFP_USER, but without GFP_FS, so buffer cache can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * coalesced (we hope); otherwise, even at order 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * heavy filesystem activity makes these fail, and we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * use compound pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * The minimum size of the eager buffers is a groups of MTU-sized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * The global eager_buffer_size parameter is checked against the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * theoretical lower limit of the value. Here, we check against the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * MTU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * If using one-pkt-per-egr-buffer, lower the eager buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * size to the max MTU (page-aligned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) rcd->egrbufs.rcvtid_size = round_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * Eager buffers sizes of 1MB or less require smaller TID sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * to satisfy the "multiple of 8 RcvArray entries" requirement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (rcd->egrbufs.size <= (1 << 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) rounddown_pow_of_two(rcd->egrbufs.size / 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) while (alloced_bytes < rcd->egrbufs.size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) rcd->egrbufs.alloced < rcd->egrbufs.count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) rcd->egrbufs.buffers[idx].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) dma_alloc_coherent(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) rcd->egrbufs.rcvtid_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) &rcd->egrbufs.buffers[idx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (rcd->egrbufs.buffers[idx].addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) rcd->egrbufs.buffers[idx].len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) rcd->egrbufs.rcvtid_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) rcd->egrbufs.buffers[idx].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) rcd->egrbufs.buffers[idx].dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) rcd->egrbufs.alloced++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) alloced_bytes += rcd->egrbufs.rcvtid_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) u32 new_size, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * Fail the eager buffer allocation if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * - we are already using the lowest acceptable size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * - we are using one-pkt-per-egr-buffer (this implies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * that we are accepting only one size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (rcd->egrbufs.rcvtid_size == round_mtu ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) goto bail_rcvegrbuf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) new_size = rcd->egrbufs.rcvtid_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * If the first attempt to allocate memory failed, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * fail everything but continue with the next lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (idx == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) rcd->egrbufs.rcvtid_size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * Re-partition already allocated buffers to a smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) rcd->egrbufs.alloced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) for (i = 0, j = 0, offset = 0; j < idx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (i >= rcd->egrbufs.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) rcd->egrbufs.rcvtids[i].dma =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) rcd->egrbufs.buffers[j].dma + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) rcd->egrbufs.rcvtids[i].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) rcd->egrbufs.buffers[j].addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) rcd->egrbufs.alloced++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if ((rcd->egrbufs.buffers[j].dma + offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) new_size) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) (rcd->egrbufs.buffers[j].dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) rcd->egrbufs.buffers[j].len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) offset += new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) rcd->egrbufs.rcvtid_size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) rcd->egrbufs.numbufs = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) rcd->egrbufs.size = alloced_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) hfi1_cdbg(PROC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) rcd->ctxt, rcd->egrbufs.alloced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * Set the contexts rcv array head update threshold to the closest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * power of 2 (so we can use a mask instead of modulo) below half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * the allocated entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) rcd->egrbufs.threshold =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * Compute the expected RcvArray entry base. This is done after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * allocating the eager buffers in order to maximize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * expected RcvArray entries for the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) rcd->expected_count = max_entries - egrtop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) rcd->expected_base = rcd->eager_base + egrtop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) rcd->eager_base, rcd->expected_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) hfi1_cdbg(PROC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) "ctxt%u: current Eager buffer size is invalid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) rcd->ctxt, rcd->egrbufs.rcvtid_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) goto bail_rcvegrbuf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) rcd->egrbufs.rcvtids[idx].dma, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) bail_rcvegrbuf_phys:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) for (idx = 0; idx < rcd->egrbufs.alloced &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) rcd->egrbufs.buffers[idx].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) dma_free_coherent(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) rcd->egrbufs.buffers[idx].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) rcd->egrbufs.buffers[idx].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) rcd->egrbufs.buffers[idx].dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) rcd->egrbufs.buffers[idx].addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) rcd->egrbufs.buffers[idx].dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) rcd->egrbufs.buffers[idx].len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }