^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015-2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <rdma/opa_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "mad.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "vnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* the reset value from the FM is supposed to be 0xffff, handle both */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define OPA_LINK_WIDTH_RESET_OLD 0x0fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define OPA_LINK_WIDTH_RESET 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct trap_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct opa_mad_notice_attr data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __be64 tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u8 in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u8 repress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int smp_length_check(u32 data_size, u32 request_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (unlikely(request_len < data_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int reply(struct ib_mad_hdr *smp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * The verbs framework will handle the directed/LID route
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * packet changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) smp->method = IB_MGMT_METHOD_GET_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) smp->status |= IB_SMP_DIRECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline void clear_opa_smp_data(struct opa_smp *smp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void *data = opa_get_smp_data(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) size_t size = opa_get_smp_data_size(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) memset(data, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return ppd->pkeys[pkey_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) event.event = IB_EVENT_PKEY_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) event.device = &dd->verbs_dev.rdi.ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) event.element.port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * If the port is down, clean up all pending traps. We need to be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * with the given trap, because it may be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct trap_node *node, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct list_head trap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) spin_lock_irqsave(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ibp->rvp.trap_lists[i].list_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Remove all items from the list, freeing all the non-given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * traps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) list_for_each_entry_safe(node, q, &trap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) list_del(&node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (node != trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * If this wasn't on one of the lists it would not be freed. If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * was on the list, it is now safe to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) kfree(trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct trap_node *trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct trap_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct trap_list *trap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned int queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int trap_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) queue_id = trap->data.generic_type & 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (queue_id >= RVT_MAX_TRAP_LISTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) trap_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) trap->data.generic_type, trap_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) kfree(trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Since the retry (handle timeout) does not remove a trap request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * from the list, all we have to do is compare the node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock_irqsave(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) trap_list = &ibp->rvp.trap_lists[queue_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) list_for_each_entry(node, &trap_list->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (node == trap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) node->retry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (trap_list->list_len < RVT_MAX_TRAP_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) trap_list->list_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) list_add_tail(&trap->list, &trap_list->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) trap->data.generic_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) kfree(trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Next check to see if there is a timer pending. If not, set it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * and get the first trap from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!timer_pending(&ibp->rvp.trap_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * o14-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * If the time out is set we have to wait until it expires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * before the trap can be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * This should be > RVT_TRAP_TIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) timeout = (RVT_TRAP_TIMEOUT *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) (1UL << ibp->rvp.subnet_timeout)) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mod_timer(&ibp->rvp.trap_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) jiffies + usecs_to_jiffies(timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) node = list_first_entry(&trap_list->list, struct trap_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) node->in_use = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct opa_smp *smp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct trap_list *trap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (smp->attr_id != IB_SMP_ATTR_NOTICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_lock_irqsave(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) trap_list = &ibp->rvp.trap_lists[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) trap = list_first_entry_or_null(&trap_list->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct trap_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (trap && trap->tid == smp->tid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (trap->in_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) trap->repress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) trap_list->list_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) list_del(&trap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kfree(trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct rdma_ah_attr *attr, u32 dlid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rdma_ah_set_dlid(attr, dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rdma_ah_set_ah_flags(attr, IB_AH_GRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) grh->sgid_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) grh->hop_limit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) grh->dgid.global.subnet_prefix =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ibp->rvp.gid_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) grh->dgid.global.interface_id = OPA_MAKE_ID(dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct rvt_ah *ah, u32 dlid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct rdma_ah_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct rvt_qp *qp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) attr.type = ah->ibah.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) hfi1_update_sm_ah_attr(ibp, &attr, dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) qp0 = rcu_dereference(ibp->rvp.qp[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (qp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ret = rdma_modify_ah(&ah->ibah, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct rdma_ah_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct ib_ah *ah = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct rvt_qp *qp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct hfi1_devdata *dd = dd_from_ppd(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u8 port_num = ppd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) hfi1_update_sm_ah_attr(ibp, &attr, dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) qp0 = rcu_dereference(ibp->rvp.qp[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (qp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct ib_mad_send_buf *send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct ib_mad_agent *agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct opa_smp *smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int pkey_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) agent = ibp->rvp.send_agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!agent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cleanup_traps(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* o14-3.2.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) cleanup_traps(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Add the trap to the list if necessary and see if we can send it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) trap = check_and_add_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (pkey_idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) __func__, hfi1_get_pkey(ibp, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pkey_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) GFP_ATOMIC, IB_MGMT_BASE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (IS_ERR(send_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) smp = send_buf->mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) smp->base_version = OPA_MGMT_BASE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) smp->class_version = OPA_SM_CLASS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) smp->method = IB_MGMT_METHOD_TRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Only update the transaction ID for new traps (o13-5). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (trap->tid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ibp->rvp.tid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* make sure that tid != 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ibp->rvp.tid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ibp->rvp.tid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) trap->tid = cpu_to_be64(ibp->rvp.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) smp->tid = trap->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) smp->attr_id = IB_SMP_ATTR_NOTICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* o14-1: smp->mkey = 0; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) memcpy(smp->route.lid.data, &trap->data, trap->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) spin_lock_irqsave(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!ibp->rvp.sm_ah) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct ib_ah *ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (IS_ERR(ah)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) send_buf->ah = ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ibp->rvp.sm_ah = ibah_to_rvtah(ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) send_buf->ah = &ibp->rvp.sm_ah->ibah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * If the trap was repressed while things were getting set up, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * bother sending it. This could happen for a retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (trap->repress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) list_del(&trap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) kfree(trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ib_free_send_mad(send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) trap->in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (ib_post_send_mad(send_buf, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ib_free_send_mad(send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) void hfi1_handle_trap_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct trap_node *trap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Find the trap with the highest priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_lock_irqsave(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct trap_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) send_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) trap = kzalloc(sizeof(*trap), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) INIT_LIST_HEAD(&trap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) trap->data.generic_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) trap->data.prod_type_lsb = IB_NOTICE_PROD_CA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) trap->data.trap_num = trap_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) trap->data.issuer_lid = cpu_to_be32(lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Send a bad P_Key trap (ch. 14.3.8).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u32 qp1, u32 qp2, u32 lid1, u32 lid2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) u32 lid = ppd_from_ibp(ibp)->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ibp->rvp.n_pkt_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ibp->rvp.pkey_violations++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* Send violation trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) trap->data.ntc_257_258.key = cpu_to_be32(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) trap->data.ntc_257_258.sl = sl << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) trap->len = sizeof(trap->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) send_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Send a bad M_Key trap (ch. 14.3.9).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) u32 lid = ppd_from_ibp(ibp)->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Send violation trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) trap->data.ntc_256.lid = trap->data.issuer_lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) trap->data.ntc_256.method = mad->method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) trap->data.ntc_256.attr_id = mad->attr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) trap->data.ntc_256.attr_mod = mad->attr_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) trap->data.ntc_256.mkey = mkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) trap->data.ntc_256.dr_slid = dr_slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) trap->data.ntc_256.dr_trunc_hop |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) IB_NOTICE_TRAP_DR_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) trap->data.ntc_256.dr_trunc_hop |= hop_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) memcpy(trap->data.ntc_256.dr_rtn_path, return_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) hop_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) trap->len = sizeof(trap->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) send_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Send a Port Capability Mask Changed trap (ch. 14.3.11).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u32 lid = ppd_from_ibp(ibp)->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) trap = create_trap_node(IB_NOTICE_TYPE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) OPA_TRAP_CHANGE_CAPABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) trap->data.ntc_144.lid = trap->data.issuer_lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) trap->len = sizeof(trap->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) send_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Send a System Image GUID Changed trap (ch. 14.3.12).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 lid = ppd_from_ibp(ibp)->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) trap->data.ntc_145.lid = trap->data.issuer_lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) trap->len = sizeof(trap->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) send_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Send a Node Description Changed trap (ch. 14.3.13).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct trap_node *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) u32 lid = ppd_from_ibp(ibp)->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) trap = create_trap_node(IB_NOTICE_TYPE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) OPA_TRAP_CHANGE_CAPABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) trap->data.ntc_144.lid = trap->data.issuer_lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) trap->data.ntc_144.change_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) trap->len = sizeof(trap->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) send_trap(ibp, trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u8 *data, struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u8 port, u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct opa_node_description *nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (am || smp_length_check(sizeof(*nd), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) nd = (struct opa_node_description *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *resp_len += sizeof(*nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct opa_node_info *ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ni = (struct opa_node_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* GUID 0 is illegal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) smp_length_check(sizeof(*ni), max_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ni->base_version = OPA_MGMT_BASE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ni->class_version = OPA_SM_CLASS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ni->node_type = 1; /* channel adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ni->num_ports = ibdev->phys_port_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* This is already in network order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ni->system_image_guid = ib_hfi1_sys_image_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ni->node_guid = ibdev->node_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ni->device_id = cpu_to_be16(dd->pcidev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ni->revision = cpu_to_be32(dd->minrev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ni->local_port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ni->vendor_id[0] = dd->oui1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ni->vendor_id[1] = dd->oui2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ni->vendor_id[2] = dd->oui3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *resp_len += sizeof(*ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* GUID 0 is illegal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (smp->attr_mod || pidx >= dd->num_pports ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ibdev->node_guid == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) nip->base_version = OPA_MGMT_BASE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) nip->class_version = OPA_SM_CLASS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) nip->node_type = 1; /* channel adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) nip->num_ports = ibdev->phys_port_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* This is already in network order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) nip->sys_guid = ib_hfi1_sys_image_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) nip->node_guid = ibdev->node_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) nip->device_id = cpu_to_be16(dd->pcidev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) nip->revision = cpu_to_be32(dd->minrev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) nip->local_port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) nip->vendor_id[0] = dd->oui1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) nip->vendor_id[1] = dd->oui2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) nip->vendor_id[2] = dd->oui3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int mad_flags, __be64 mkey, __be32 dr_slid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u8 return_path[], u8 hop_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int valid_mkey = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* Is the mkey in the process of expiring? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ibp->rvp.mkey_lease_timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Clear timeout and mkey protection field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ibp->rvp.mkey_lease_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ibp->rvp.mkeyprot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ibp->rvp.mkey == mkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) valid_mkey = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Unset lease timeout on any valid Get/Set/TrapRepress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) (mad->method == IB_MGMT_METHOD_GET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mad->method == IB_MGMT_METHOD_SET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ibp->rvp.mkey_lease_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (!valid_mkey) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) switch (mad->method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) case IB_MGMT_METHOD_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Bad mkey not a violation below level 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ibp->rvp.mkeyprot < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) case IB_MGMT_METHOD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) case IB_MGMT_METHOD_TRAP_REPRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ibp->rvp.mkey_violations != 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ++ibp->rvp.mkey_violations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!ibp->rvp.mkey_lease_timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ibp->rvp.mkey_lease_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ibp->rvp.mkey_lease_timeout = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ibp->rvp.mkey_lease_period * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Generate a trap notice. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) bad_mkey(ibp, mad, mkey, dr_slid, return_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) hop_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * The SMA caches reads from LCB registers in case the LCB is unavailable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * (The LCB is unavailable in certain link states, for example.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct lcb_datum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u32 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static struct lcb_datum lcb_cache[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int write_lcb_cache(u32 off, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (lcb_cache[i].off == off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) lcb_cache[i].val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) pr_warn("%s bad offset 0x%x\n", __func__, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int read_lcb_cache(u32 off, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (lcb_cache[i].off == off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) *val = lcb_cache[i].val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pr_warn("%s bad offset 0x%x\n", __func__, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) void read_ltp_rtt(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct opa_port_info *pi = (struct opa_port_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) u8 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u8 credit_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u8 is_beaconing_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u32 num_ports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) u32 buffer_units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) u64 tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* IB numbers ports from 1, hw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) pi->lid = cpu_to_be32(ppd->lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Only return the mkey if the protection field allows it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (!(smp->method == IB_MGMT_METHOD_GET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ibp->rvp.mkey != smp->mkey &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ibp->rvp.mkeyprot == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pi->mkey = ibp->rvp.mkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) pi->subnet_prefix = ibp->rvp.gid_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pi->sa_qp = cpu_to_be32(ppd->sa_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pi->link_width.active = cpu_to_be16(ppd->link_width_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pi->link_width_downgrade.supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) cpu_to_be16(ppd->link_width_downgrade_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pi->link_width_downgrade.enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cpu_to_be16(ppd->link_width_downgrade_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pi->link_width_downgrade.tx_active =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) cpu_to_be16(ppd->link_width_downgrade_tx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) pi->link_width_downgrade.rx_active =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) cpu_to_be16(ppd->link_width_downgrade_rx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) state = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (start_of_sm_config && (state == IB_PORT_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ppd->is_sm_config_started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) pi->port_phys_conf = (ppd->port_type & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) pi->port_states.ledenable_offlinereason |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ppd->is_sm_config_started << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * This pairs with the memory barrier in hfi1_start_led_override to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * ensure that we read the correct state of LED beaconing represented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * by led_override_timer_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pi->port_states.ledenable_offlinereason |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) ppd->offline_disabled_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) pi->port_states.portphysstate_portstate =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) (driver_pstate(ppd) << 4) | state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) for (i = 0; i < ppd->vls_supported; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if ((i % 2) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* don't forget VL 15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) pi->partenforce_filterraw |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* P_KeyViolations are counted by hardware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) pi->vl.cap = ppd->vls_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) OPA_PORT_LINK_MODE_OPA << 5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) OPA_PORT_LINK_MODE_OPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) pi->port_mode = cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ppd->is_active_optimize_enabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) pi->port_packet_format.supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) OPA_PORT_PACKET_FORMAT_16B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pi->port_packet_format.enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) OPA_PORT_PACKET_FORMAT_16B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* flit_control.interleave is (OPA V1, version .76):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * bits use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * ---- ---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * 2 res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * 2 DistanceSupported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * 2 DistanceEnabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * 5 MaxNextLevelTxEnabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * 5 MaxNestLevelRxSupported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * HFI supports only "distance mode 1" (see OPA V1, version .76,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * section 9.6.2), so set DistanceSupported, DistanceEnabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * to 0x1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) pi->flit_control.interleave = cpu_to_be16(0x1400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) pi->link_down_reason = ppd->local_link_down_reason.sma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) pi->port_error_action = cpu_to_be32(ppd->port_error_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* 32.768 usec. response time (guessing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pi->resptimevalue = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) pi->local_port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* buffer info for FM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pi->neigh_port_num = ppd->neighbor_port_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) pi->port_neigh_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) (ppd->neighbor_fm_security ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* HFIs shall always return VL15 credits to their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * neighbor in a timely manner, without any credit return pacing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) credit_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) buffer_units |= (credit_rate << 6) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pi->buffer_units = cpu_to_be32(buffer_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) << 3 | (OPA_MCAST_NR & 0x7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* HFI supports a replay buffer 128 LTPs in size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) pi->replay_depth.buffer = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * this counter is 16 bits wide, but the replay_depth.wire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * variable is only 8 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (tmp > 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) tmp = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pi->replay_depth.wire = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) *resp_len += sizeof(struct opa_port_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * get_pkeys - return the PKEY table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * @port: the IB port number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * @pkeys: the pkey table is placed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct hfi1_pportdata *ppd = dd->pport + port - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) u32 n_blocks_req = OPA_AM_NBLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u32 start_block = am & 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) __be16 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) u16 *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) u16 n_blocks_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) unsigned npkeys = hfi1_get_npkeys(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (n_blocks_req == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) port, start_block, n_blocks_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (start_block + n_blocks_req > n_blocks_avail ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) "avail 0x%x; blk/smp 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) start_block, n_blocks_req, n_blocks_avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) OPA_NUM_PKEY_BLOCKS_PER_SMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) p = (__be16 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) q = (u16 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* get the real pkeys if we are requesting the first block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (start_block == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) get_pkeys(dd, port, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) for (i = 0; i < npkeys; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) p[i] = cpu_to_be16(q[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) HFI_TRANSITION_DISALLOWED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) HFI_TRANSITION_IGNORED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) HFI_TRANSITION_ALLOWED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) HFI_TRANSITION_UNDEFINED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * Use shortened names to improve readability of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * {logical,physical}_state_transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) __D = HFI_TRANSITION_DISALLOWED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) __I = HFI_TRANSITION_IGNORED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) __A = HFI_TRANSITION_ALLOWED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) __U = HFI_TRANSITION_UNDEFINED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * represented in physical_state_transitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * Within physical_state_transitions, rows represent "old" states,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * columns "new" states, and physical_state_transitions.allowed[old][new]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * indicates if the transition from old state to new state is legal (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * OPAg1v1, Table 6-4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) } physical_state_transitions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /* 2 3 4 5 6 7 8 9 10 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * logical_state_transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * Within logical_state_transitions rows represent "old" states,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * columns "new" states, and logical_state_transitions.allowed[old][new]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * indicates if the transition from old state to new state is legal (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * OPAg1v1, Table 9-12).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } logical_state_transitions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* 1 2 3 4 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* 1 */ { __I, __D, __D, __D, __U},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* 2 */ { __D, __I, __A, __D, __U},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* 3 */ { __D, __D, __I, __A, __U},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /* 4 */ { __D, __D, __I, __I, __U},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* 5 */ { __U, __U, __U, __U, __U},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int logical_transition_allowed(int old, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) pr_warn("invalid logical state(s) (old %d new %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return HFI_TRANSITION_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (new == IB_PORT_NOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return HFI_TRANSITION_ALLOWED; /* always allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /* adjust states for indexing into logical_state_transitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) old -= IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) new -= IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (old < 0 || new < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return HFI_TRANSITION_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return logical_state_transitions.allowed[old][new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static int physical_transition_allowed(int old, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pr_warn("invalid physical state(s) (old %d new %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return HFI_TRANSITION_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (new == IB_PORTPHYSSTATE_NOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return HFI_TRANSITION_ALLOWED; /* always allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* adjust states for indexing into physical_state_transitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) old -= IB_PORTPHYSSTATE_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) new -= IB_PORTPHYSSTATE_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (old < 0 || new < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return HFI_TRANSITION_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return physical_state_transitions.allowed[old][new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) u32 logical_new, u32 physical_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) u32 physical_old = driver_pstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) u32 logical_old = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) int ret, logical_allowed, physical_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ret = logical_transition_allowed(logical_old, logical_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) logical_allowed = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (ret == HFI_TRANSITION_DISALLOWED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ret == HFI_TRANSITION_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pr_warn("invalid logical state transition %s -> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) opa_lstate_name(logical_old),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) opa_lstate_name(logical_new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ret = physical_transition_allowed(physical_old, physical_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) physical_allowed = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (ret == HFI_TRANSITION_DISALLOWED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ret == HFI_TRANSITION_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) pr_warn("invalid physical state transition %s -> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) opa_pstate_name(physical_old),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) opa_pstate_name(physical_new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (logical_allowed == HFI_TRANSITION_IGNORED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) physical_allowed == HFI_TRANSITION_IGNORED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return HFI_TRANSITION_IGNORED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * A change request of Physical Port State from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * 'Offline' to 'Polling' should be ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) (physical_new == IB_PORTPHYSSTATE_POLLING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return HFI_TRANSITION_IGNORED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * Either physical_allowed or logical_allowed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * HFI_TRANSITION_ALLOWED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return HFI_TRANSITION_ALLOWED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) u32 logical_state, u32 phys_state, int local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) u32 link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ret = port_states_transition_allowed(ppd, logical_state, phys_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (ret == HFI_TRANSITION_DISALLOWED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ret == HFI_TRANSITION_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* error message emitted above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (ret == HFI_TRANSITION_IGNORED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) !(logical_state == IB_PORT_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) logical_state == IB_PORT_NOP)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) logical_state, phys_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * Logical state changes are summarized in OPAv1g1 spec.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Table 9-12; physical state changes are summarized in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * OPAv1g1 spec., Table 6.4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) switch (logical_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) case IB_PORT_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (phys_state == IB_PORTPHYSSTATE_NOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) case IB_PORT_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (phys_state == IB_PORTPHYSSTATE_NOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) link_state = HLS_DN_DOWNDEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) link_state = HLS_DN_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) link_state = HLS_DN_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) phys_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if ((link_state == HLS_DN_POLL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) link_state == HLS_DN_DOWNDEF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * Going to poll. No matter what the current state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * always move offline first, then tune and start the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * link. This correctly handles a FM link bounce and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * a link enable. Going offline is a no-op if already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) set_link_state(ppd, HLS_DN_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) set_link_state(ppd, link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (link_state == HLS_DN_DISABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) (ppd->offline_disabled_reason >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ppd->offline_disabled_reason ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * Don't send a reply if the response would be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * through the disabled port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (link_state == HLS_DN_DISABLE && !local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) case IB_PORT_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ret = set_link_state(ppd, HLS_UP_ARMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) send_idle_sma(dd, SMA_IDLE_ARM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) case IB_PORT_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (ppd->neighbor_normal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ret = set_link_state(ppd, HLS_UP_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) send_idle_sma(dd, SMA_IDLE_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) logical_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * subn_set_opa_portinfo - set port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * @smp: the incoming SM packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * @ibdev: the infiniband device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * @port: the port on the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) u32 *resp_len, u32 max_len, int local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct opa_port_info *pi = (struct opa_port_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) u8 clientrereg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) u32 smlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) u32 lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) u8 ls_old, ls_new, ps_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) u8 vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) u8 msl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u8 crc_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) u16 lse, lwe, mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) u32 num_ports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int ret, i, invalid = 0, call_set_mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) int call_link_downgrade_policy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (num_ports != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) smp_length_check(sizeof(*pi), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) lid = be32_to_cpu(pi->lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (lid & 0xFF000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) pr_warn("OPA_PortInfo lid out of range: %X\n", lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) goto get_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) smlid = be32_to_cpu(pi->sm_lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (smlid & 0xFF000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) goto get_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) clientrereg = (pi->clientrereg_subnettimeout &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) OPA_PI_MASK_CLIENT_REREGISTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* IB numbers ports from 1, hw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ibp = &ppd->ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) event.device = ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) event.element.port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ls_old = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ibp->rvp.mkey = pi->mkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (ibp->rvp.gid_prefix != pi->subnet_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ibp->rvp.gid_prefix = pi->subnet_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) event.event = IB_EVENT_GID_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* Must be a valid unicast LID address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if ((lid == 0 && ls_old > IB_PORT_INIT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) (hfi1_is_16B_mcast(lid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) } else if (ppd->lid != lid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (ppd->lid != lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) event.event = IB_EVENT_LID_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* Manufacture GID from LID to support extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) ppd->guids[HFI1_PORT_GUID_INDEX + 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) be64_to_cpu(OPA_MAKE_ID(lid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) event.event = IB_EVENT_GID_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) msl = pi->smsl & OPA_PI_MASK_SMSL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ppd->linkinit_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) (pi->partenforce_filterraw &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) OPA_PI_MASK_LINKINIT_REASON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* Must be a valid unicast LID address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) (hfi1_is_16B_mcast(smlid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) spin_lock_irqsave(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (ibp->rvp.sm_ah) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (smlid != ibp->rvp.sm_lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (msl != ibp->rvp.sm_sl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) spin_unlock_irqrestore(&ibp->rvp.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (smlid != ibp->rvp.sm_lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) ibp->rvp.sm_lid = smlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (msl != ibp->rvp.sm_sl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) ibp->rvp.sm_sl = msl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) event.event = IB_EVENT_SM_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (pi->link_down_reason == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) ppd->local_link_down_reason.sma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ppd->local_link_down_reason.latest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (pi->neigh_link_down_reason == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ppd->neigh_link_down_reason.sma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ppd->neigh_link_down_reason.latest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ppd->sa_qp = be32_to_cpu(pi->sa_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) ppd->port_error_action = be32_to_cpu(pi->port_error_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) lwe = be16_to_cpu(pi->link_width.enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (lwe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (lwe == OPA_LINK_WIDTH_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) lwe == OPA_LINK_WIDTH_RESET_OLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) set_link_width_enabled(ppd, ppd->link_width_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) else if ((lwe & ~ppd->link_width_supported) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) set_link_width_enabled(ppd, lwe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* LWD.E is always applied - 0 means "disabled" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (lwe == OPA_LINK_WIDTH_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) lwe == OPA_LINK_WIDTH_RESET_OLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) set_link_width_downgrade_enabled(ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) ppd->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) link_width_downgrade_supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* only set and apply if something changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (lwe != ppd->link_width_downgrade_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) set_link_width_downgrade_enabled(ppd, lwe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) call_link_downgrade_policy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) lse = be16_to_cpu(pi->link_speed.enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (lse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (lse & be16_to_cpu(pi->link_speed.supported))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) set_link_speed_enabled(ppd, lse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) ibp->rvp.mkeyprot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) ibp->rvp.vl_high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) for (i = 0; i < ppd->vls_supported; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if ((i % 2) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 4) & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (mtu == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) mtu = hfi1_max_mtu; /* use a valid MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (dd->vld[i].mtu != mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) "MTU change on vl %d from %d to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) i, dd->vld[i].mtu, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) dd->vld[i].mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) call_set_mtu++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* As per OPAV1 spec: VL15 must support and be configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * for operation with a 2048 or larger MTU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (mtu < 2048 || mtu == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) mtu = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (dd->vld[15].mtu != mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) "MTU change on vl 15 from %d to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) dd->vld[15].mtu, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) dd->vld[15].mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) call_set_mtu++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (call_set_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) set_mtu(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* Set operational VLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (vls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (vls > ppd->vls_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) pi->operational_vls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) vls) == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (pi->mkey_violations == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ibp->rvp.mkey_violations = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (pi->pkey_violations == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) ibp->rvp.pkey_violations = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (pi->qkey_violations == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ibp->rvp.qkey_violations = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) ibp->rvp.subnet_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) crc_enabled >>= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) crc_enabled &= 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (crc_enabled != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ppd->is_active_optimize_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) !!(be16_to_cpu(pi->port_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ls_new = pi->port_states.portphysstate_portstate &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) OPA_PI_MASK_PORT_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ps_new = (pi->port_states.portphysstate_portstate &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (ls_old == IB_PORT_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (start_of_sm_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) ppd->is_sm_config_started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) } else if (ls_new == IB_PORT_ARMED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (ppd->is_sm_config_started == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) invalid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (clientrereg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) event.event = IB_EVENT_CLIENT_REREGISTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * Do the port state change now that the other link parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * have been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * Changing the port physical state only makes sense if the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * is down or is being set to down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (!invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) /* restore re-reg bit per o14-12.2.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) pi->clientrereg_subnettimeout |= clientrereg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * Apply the new link downgrade policy. This may result in a link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * bounce. Do this after everything else so things are settled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * Possible problem: if setting the port state above fails, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * the policy change is not applied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (call_link_downgrade_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) apply_link_downgrade_policy(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) get_only:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * set_pkeys - set the PKEY table for ctxt 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * @dd: the hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * @port: the IB port number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * @pkeys: the PKEY table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) int changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) int update_includes_mgmt_partition = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * IB port one/two always maps to context zero/one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * always a kernel context, no locking needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * If we get here with ppd setup, no need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * that rcd is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * If the update does not include the management pkey, don't do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (pkeys[i] == LIM_MGMT_P_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) update_includes_mgmt_partition = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (!update_includes_mgmt_partition)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) u16 key = pkeys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) u16 okey = ppd->pkeys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (key == okey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * The SM gives us the complete PKey table. We have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * to ensure that we put the PKeys in the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ppd->pkeys[i] = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) hfi1_event_pkey_change(dd, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) u32 n_blocks_sent = OPA_AM_NBLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) u32 start_block = am & 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) u16 *p = (u16 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) __be16 *q = (__be16 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) u16 n_blocks_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) unsigned npkeys = hfi1_get_npkeys(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (n_blocks_sent == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) port, start_block, n_blocks_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (start_block + n_blocks_sent > n_blocks_avail ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) start_block, n_blocks_sent, n_blocks_avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) OPA_NUM_PKEY_BLOCKS_PER_SMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) p[i] = be16_to_cpu(q[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) #define ILLEGAL_VL 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * for SC15, which must map to VL15). If we don't remap things this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * way it is possible for VL15 counters to increment when we try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * send on a SC which is mapped to an invalid VL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * When getting the table convert ILLEGAL_VL back to VL15.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static void filter_sc2vlt(void *data, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) u8 *pd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) for (i = 0; i < OPA_MAX_SCS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (i == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if ((pd[i] & 0x1f) == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) pd[i] = ILLEGAL_VL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if ((pd[i] & 0x1f) == ILLEGAL_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) pd[i] = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) u64 *val = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) filter_sc2vlt(data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) write_csr(dd, SEND_SC2VLT0, *val++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) write_csr(dd, SEND_SC2VLT1, *val++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) write_csr(dd, SEND_SC2VLT2, *val++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) write_csr(dd, SEND_SC2VLT3, *val++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) write_seqlock_irq(&dd->sc2vl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) write_sequnlock_irq(&dd->sc2vl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) u64 *val = (u64 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) *val++ = read_csr(dd, SEND_SC2VLT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) *val++ = read_csr(dd, SEND_SC2VLT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) *val++ = read_csr(dd, SEND_SC2VLT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) *val++ = read_csr(dd, SEND_SC2VLT3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) filter_sc2vlt((u64 *)data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (am || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) *p++ = ibp->sl_to_sc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) size_t size = ARRAY_SIZE(ibp->sl_to_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) u8 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (am || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) sc = *p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (ibp->sl_to_sc[i] != sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) ibp->sl_to_sc[i] = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /* Put all stale qps into error state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) hfi1_error_port_qps(ibp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (am || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) *p++ = ibp->sc_to_sl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) size_t size = ARRAY_SIZE(ibp->sc_to_sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (am || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) ibp->sc_to_sl[i] = *p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) u32 n_blocks = OPA_AM_NBLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) void *vp = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) size_t size = 4 * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (n_blocks != 1 || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) get_sc2vlt_tables(dd, vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) u32 n_blocks = OPA_AM_NBLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) int async_update = OPA_AM_ASYNC(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) void *vp = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) int lstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * set_sc2vlt_tables writes the information contained in *data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * to four 64-bit registers SendSC2VLt[0-3]. We need to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * sure *max_len is not greater than the total size of the four
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * SendSC2VLt[0-3] registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) size_t size = 4 * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /* IB numbers ports from 1, hw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) lstate = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * it's known that async_update is 0 by this point, but include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * the explicit check for clarity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (!async_update &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) set_sc2vlt_tables(dd, vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) u32 n_blocks = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) void *vp = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) int size = sizeof(struct sc2vlnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (n_blocks != 1 || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) u32 n_blocks = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) void *vp = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) int lstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) int size = sizeof(struct sc2vlnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (n_blocks != 1 || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /* IB numbers ports from 1, hw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) lstate = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) u32 nports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) u32 lstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) lstate = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (start_of_sm_config && (lstate == IB_PORT_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ppd->is_sm_config_started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) psi->port_states.ledenable_offlinereason |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) ppd->is_sm_config_started << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) psi->port_states.ledenable_offlinereason |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) ppd->offline_disabled_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) psi->port_states.portphysstate_portstate =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) (driver_pstate(ppd) << 4) | (lstate & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) psi->link_width_downgrade_tx_active =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) cpu_to_be16(ppd->link_width_downgrade_tx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) psi->link_width_downgrade_rx_active =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) cpu_to_be16(ppd->link_width_downgrade_rx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) *resp_len += sizeof(struct opa_port_state_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) u32 *resp_len, u32 max_len, int local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) u32 nports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) u32 ls_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) u8 ls_new, ps_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) int ret, invalid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) ls_old = driver_lstate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) ls_new = port_states_to_logical_state(&psi->port_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ps_new = port_states_to_phys_state(&psi->port_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (ls_old == IB_PORT_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (start_of_sm_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) ppd->is_sm_config_started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) } else if (ls_new == IB_PORT_ARMED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (ppd->is_sm_config_started == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) invalid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (!invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) u32 addr = OPA_AM_CI_ADDR(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) u32 len = OPA_AM_CI_LEN(am) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (dd->pport->port_type != PORT_TYPE_QSFP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) smp_length_check(len, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * check that addr is within spec, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * addr and (addr + len - 1) are on the same "page"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (addr >= 4096 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) ret = get_cable_info(dd, port, addr, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (ret == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) smp->status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /* The address range for the CableInfo SMA query is wider than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * memory available on the QSFP cable. We want to return a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * response, albeit zeroed out, for address ranges beyond available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * memory but that are within the CableInfo query spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (ret < 0 && ret != -ERANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) *resp_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) struct ib_device *ibdev, u8 port, u32 *resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) u32 num_ports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) struct buffer_control *p = (struct buffer_control *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) int size = sizeof(struct buffer_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (num_ports != 1 || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) trace_bct_get(dd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct ib_device *ibdev, u8 port, u32 *resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) u32 num_ports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) struct buffer_control *p = (struct buffer_control *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) ppd = dd->pport + (port - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) trace_bct_set(dd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) u32 num_ports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) u8 section = (am & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) int size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (num_ports != 1 || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) switch (section) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) case OPA_VLARB_LOW_ELEMENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) case OPA_VLARB_HIGH_ELEMENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) case OPA_VLARB_PREEMPT_ELEMENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) case OPA_VLARB_PREEMPT_MATRIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) be32_to_cpu(smp->attr_mod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (size > 0 && resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) u32 num_ports = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) u8 section = (am & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) int size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (num_ports != 1 || smp_length_check(size, max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) switch (section) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) case OPA_VLARB_LOW_ELEMENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) case OPA_VLARB_HIGH_ELEMENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) * can be changed from the default values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) case OPA_VLARB_PREEMPT_ELEMENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) case OPA_VLARB_PREEMPT_MATRIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) smp->status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) be32_to_cpu(smp->attr_mod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) struct opa_pma_mad {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct ib_mad_hdr mad_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) u8 data[2024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct opa_port_status_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) __u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) __u8 reserved[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) __be32 vl_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) #define VL_MASK_ALL 0x00000000000080ffUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct opa_port_status_rsp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) __u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) __u8 reserved[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) __be32 vl_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) /* Data counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) __be64 port_xmit_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) __be64 port_rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) __be64 port_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) __be64 port_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) __be64 port_multicast_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) __be64 port_multicast_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) __be64 port_xmit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) __be64 sw_port_congestion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) __be64 port_rcv_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) __be64 port_rcv_becn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) __be64 port_xmit_time_cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) __be64 port_xmit_wasted_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) __be64 port_xmit_wait_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) __be64 port_rcv_bubble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) __be64 port_mark_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /* Error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) __be64 port_rcv_constraint_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) __be64 port_rcv_switch_relay_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) __be64 port_xmit_discards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) __be64 port_xmit_constraint_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) __be64 port_rcv_remote_physical_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) __be64 local_link_integrity_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) __be64 port_rcv_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) __be64 excessive_buffer_overruns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) __be64 fm_config_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) __be32 link_error_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) __be32 link_downed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) u8 uncorrectable_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) u8 link_quality_indicator; /* 5res, 3bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) u8 res2[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct _vls_pctrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /* per-VL Data counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) __be64 port_vl_xmit_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) __be64 port_vl_rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) __be64 port_vl_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) __be64 port_vl_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) __be64 port_vl_xmit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) __be64 sw_port_vl_congestion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) __be64 port_vl_rcv_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) __be64 port_vl_rcv_becn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) __be64 port_xmit_time_cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) __be64 port_vl_xmit_wasted_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) __be64 port_vl_xmit_wait_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) __be64 port_vl_rcv_bubble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) __be64 port_vl_mark_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) __be64 port_vl_xmit_discards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) } vls[]; /* real array size defined by # bits set in vl_select_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) enum counter_selects {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) CS_PORT_XMIT_DATA = (1 << 31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) CS_PORT_RCV_DATA = (1 << 30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) CS_PORT_XMIT_PKTS = (1 << 29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) CS_PORT_RCV_PKTS = (1 << 28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) CS_PORT_MCAST_RCV_PKTS = (1 << 26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) CS_PORT_XMIT_WAIT = (1 << 25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) CS_SW_PORT_CONGESTION = (1 << 24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) CS_PORT_RCV_FECN = (1 << 23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) CS_PORT_RCV_BECN = (1 << 22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) CS_PORT_XMIT_TIME_CONG = (1 << 21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) CS_PORT_XMIT_WASTED_BW = (1 << 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) CS_PORT_XMIT_WAIT_DATA = (1 << 19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) CS_PORT_RCV_BUBBLE = (1 << 18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) CS_PORT_MARK_FECN = (1 << 17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) CS_PORT_XMIT_DISCARDS = (1 << 14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) CS_PORT_RCV_ERRORS = (1 << 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) CS_FM_CONFIG_ERRORS = (1 << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) CS_LINK_ERROR_RECOVERY = (1 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) CS_LINK_DOWNED = (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) CS_UNCORRECTABLE_ERRORS = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) struct opa_clear_port_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) __be64 port_select_mask[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) __be32 counter_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct opa_aggregate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) __be16 attr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) __be32 attr_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) u8 data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) #define MSK_LLI 0x000000f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) #define MSK_LLI_SFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) #define MSK_LER 0x0000000f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) #define MSK_LER_SFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) #define ADD_LLI 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) #define ADD_LER 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* Request contains first three fields, response contains those plus the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) struct opa_port_data_counters_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) __be64 port_select_mask[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) __be32 vl_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) __be32 resolution;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /* Response fields follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) struct _port_dctrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) u8 port_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) u8 reserved2[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) __be32 link_quality_indicator; /* 29res, 3bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) /* Data counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) __be64 port_xmit_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) __be64 port_rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) __be64 port_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) __be64 port_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) __be64 port_multicast_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) __be64 port_multicast_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) __be64 port_xmit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) __be64 sw_port_congestion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) __be64 port_rcv_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) __be64 port_rcv_becn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) __be64 port_xmit_time_cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) __be64 port_xmit_wasted_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) __be64 port_xmit_wait_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) __be64 port_rcv_bubble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) __be64 port_mark_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) __be64 port_error_counter_summary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) /* Sum of error counts/port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct _vls_dctrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) /* per-VL Data counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) __be64 port_vl_xmit_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) __be64 port_vl_rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) __be64 port_vl_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) __be64 port_vl_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) __be64 port_vl_xmit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) __be64 sw_port_vl_congestion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) __be64 port_vl_rcv_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) __be64 port_vl_rcv_becn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) __be64 port_xmit_time_cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) __be64 port_vl_xmit_wasted_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) __be64 port_vl_xmit_wait_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) __be64 port_vl_rcv_bubble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) __be64 port_vl_mark_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) } vls[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) /* array size defined by #bits set in vl_select_mask*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) } port[1]; /* array size defined by #ports in attribute modifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) struct opa_port_error_counters64_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) * Request contains first two fields, response contains the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) * whole magilla
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) __be64 port_select_mask[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) __be32 vl_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /* Response-only fields follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) __be32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) struct _port_ectrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) u8 port_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) u8 reserved2[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) __be64 port_rcv_constraint_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) __be64 port_rcv_switch_relay_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) __be64 port_xmit_discards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) __be64 port_xmit_constraint_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) __be64 port_rcv_remote_physical_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) __be64 local_link_integrity_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) __be64 port_rcv_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) __be64 excessive_buffer_overruns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) __be64 fm_config_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) __be32 link_error_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) __be32 link_downed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) u8 uncorrectable_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) u8 reserved3[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) struct _vls_ectrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) __be64 port_vl_xmit_discards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) } vls[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) /* array size defined by #bits set in vl_select_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) } port[1]; /* array size defined by #ports in attribute modifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) struct opa_port_error_info_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) __be64 port_select_mask[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) __be32 error_info_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) __be32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) struct _port_ei {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) u8 port_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) u8 reserved2[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) /* PortRcvErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) u8 status_and_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) u8 raw[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* EI1to12 format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) u8 packet_flit1[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) u8 packet_flit2[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) u8 remaining_flit_bits12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) } ei1to12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) u8 packet_bytes[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) u8 remaining_flit_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) } ei13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) } ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) u8 reserved3[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) } __packed port_rcv_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) /* ExcessiveBufferOverrunInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) u8 status_and_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) u8 reserved4[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) } __packed excessive_buffer_overrun_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) /* PortXmitConstraintErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) u8 reserved5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) __be16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) __be32 slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) } __packed port_xmit_constraint_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) /* PortRcvConstraintErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) u8 reserved6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) __be16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) __be32 slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) } __packed port_rcv_constraint_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) /* PortRcvSwitchRelayErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) u8 status_and_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) u8 reserved7[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) __u32 error_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) } __packed port_rcv_switch_relay_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /* UncorrectableErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) u8 status_and_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) u8 reserved8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) } __packed uncorrectable_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) /* FMConfigErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) u8 status_and_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) u8 error_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) } __packed fm_config_ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) __u32 reserved9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) } port[1]; /* actual array size defined by #ports in attr modifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /* opa_port_error_info_msg error_info_select_mask bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) enum error_info_selects {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) ES_PORT_RCV_ERROR_INFO = (1 << 31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) ES_FM_CONFIG_ERROR_INFO = (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) struct ib_device *ibdev, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) struct opa_class_port_info *p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) (struct opa_class_port_info *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) memset(pmp->data, 0, sizeof(pmp->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (pmp->mad_hdr.attr_mod != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) p->base_version = OPA_MGMT_BASE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) p->class_version = OPA_SM_CLASS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) p->cap_mask2_resp_time = cpu_to_be32(18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) *resp_len += sizeof(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) static void a0_portstatus(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct opa_port_status_rsp *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (!is_bx(ppd->dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) unsigned long vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) u64 sum_vl_xmit_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) unsigned long vl_all_mask = VL_MASK_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) u64 tmp = sum_vl_xmit_wait +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) read_port_cntr(ppd, C_TX_WAIT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) idx_from_vl(vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (tmp < sum_vl_xmit_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) /* we wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) sum_vl_xmit_wait = (u64)~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) sum_vl_xmit_wait = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * tx_link_width - convert link width bitmask to integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * value representing actual link width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * @link_width: width of active link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * @return: return index of the bit set in link_width var
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * The function convert and return the index of bit set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * that indicate the current link width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) u16 tx_link_width(u16 link_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) int n = LINK_WIDTH_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) u16 tx_width = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) while (link_width && n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (link_width & (1 << (n - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) tx_width = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) n--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return tx_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) * counter in unit of TXE cycle times to flit times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * @ppd: info of physical Hfi port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * @link_width: width of active link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * @link_speed: speed of active link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * and if vl value is C_VL_COUNT, it represent SendWaitCnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * counter request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * flit times. Call this function to samples these counters. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * function will calculate for previous state transition and update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * current state at end of function using ppd->prev_link_width and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) u16 link_width, u16 link_speed, int vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) u64 port_vl_xmit_wait_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) u64 delta_vl_xmit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) u64 xmit_wait_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (vl > C_VL_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (vl < C_VL_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) port_vl_xmit_wait_curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) read_port_cntr(ppd, C_TX_WAIT_VL, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) port_vl_xmit_wait_curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) xmit_wait_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) port_vl_xmit_wait_curr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) ppd->port_vl_xmit_wait_last[vl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) delta_vl_xmit_wait =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) convert_xmit_counter(xmit_wait_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) ppd->prev_link_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) link_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) ppd->prev_link_width = link_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return ppd->vl_xmit_flit_cnt[vl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) u8 port, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct opa_port_status_req *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) (struct opa_port_status_req *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) struct opa_port_status_rsp *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) unsigned long vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) size_t response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) u8 port_num = req->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) u8 num_vls = hweight64(vl_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) struct _vls_pctrs *vlinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) int vfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) u64 tmp, tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) u16 link_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) u16 link_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) response_data_size = struct_size(rsp, vls, num_vls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) if (response_data_size > sizeof(pmp->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) if (nports != 1 || (port_num && port_num != port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) memset(pmp->data, 0, sizeof(pmp->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) rsp = (struct opa_port_status_rsp *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) rsp->port_num = port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) rsp->port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) rsp->port_rcv_constraint_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) rsp->port_multicast_xmit_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) rsp->port_multicast_rcv_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * Convert PortXmitWait counter from TXE cycle times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * to flit times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) link_width =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) tx_link_width(ppd->link_width_downgrade_tx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) link_speed = get_link_speed(ppd->link_speed_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) rsp->port_xmit_wait =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) link_speed, C_VL_COUNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) rsp->port_rcv_fecn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) rsp->port_rcv_becn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) rsp->port_xmit_discards =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) rsp->port_xmit_constraint_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) rsp->port_rcv_remote_physical_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) rsp->local_link_integrity_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) /* overflow/wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) rsp->link_error_recovery = cpu_to_be32(~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) rsp->link_error_recovery = cpu_to_be32(tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) rsp->port_rcv_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) rsp->excessive_buffer_overruns =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) rsp->fm_config_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) vlinfo = &rsp->vls[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) vfi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) /* The vl_select_mask has been checked above, and we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * that it contains only entries which represent valid VLs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * So in the for_each_set_bit() loop below, we don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * any additional checks for vl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) memset(vlinfo, 0, sizeof(*vlinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) rsp->vls[vfi].port_vl_rcv_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) rsp->vls[vfi].port_vl_xmit_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) rsp->vls[vfi].port_vl_xmit_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) * Convert PortVlXmitWait counter from TXE cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * times to flit times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) rsp->vls[vfi].port_vl_xmit_wait =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) link_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) rsp->vls[vfi].port_vl_rcv_fecn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) rsp->vls[vfi].port_vl_rcv_becn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) rsp->vls[vfi].port_vl_xmit_discards =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) vlinfo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) vfi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) a0_portstatus(ppd, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) *resp_len += response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) u8 res_lli, u8 res_ler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) u64 error_counter_summary = 0, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) /* port_rcv_switch_relay_errors is 0 for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) /* local link integrity must be right-shifted by the lli resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) CNTR_INVALID_VL) >> res_lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) /* link error recovery must b right-shifted by the ler resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) error_counter_summary += (tmp >> res_ler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) /* ppd->link_downed is a 32-bit value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) /* this is an 8-bit quantity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return error_counter_summary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (!is_bx(ppd->dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) unsigned long vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) u64 sum_vl_xmit_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) unsigned long vl_all_mask = VL_MASK_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) u64 tmp = sum_vl_xmit_wait +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) read_port_cntr(ppd, C_TX_WAIT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) idx_from_vl(vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (tmp < sum_vl_xmit_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) /* we wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) sum_vl_xmit_wait = (u64)~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) sum_vl_xmit_wait = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) struct _port_dctrs *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) rsp->port_multicast_xmit_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) rsp->port_multicast_rcv_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) u8 port, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct opa_port_data_counters_msg *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) (struct opa_port_data_counters_msg *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) struct _port_dctrs *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) struct _vls_dctrs *vlinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) size_t response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) u32 num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) u8 lq, num_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) u8 res_lli, res_ler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) u64 port_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) unsigned long vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) unsigned long vl_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) int vfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) u16 link_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) u16 link_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) vl_select_mask = be32_to_cpu(req->vl_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) res_lli = res_lli ? res_lli + ADD_LLI : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) res_ler = res_ler ? res_ler + ADD_LER : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) response_data_size = struct_size(req, port[0].vls, num_vls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (response_data_size > sizeof(pmp->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) * The bit set in the mask needs to be consistent with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) * port the request came in on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) port_mask = be64_to_cpu(req->port_select_mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) port_num = find_first_bit((unsigned long *)&port_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) sizeof(port_mask) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) if (port_num != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) rsp = &req->port[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) memset(rsp, 0, sizeof(*rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) rsp->port_number = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * Note that link_quality_indicator is a 32 bit quantity in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * 'datacounters' queries (as opposed to 'portinfo' queries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * where it's a byte).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) hfi1_read_link_quality(dd, &lq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) rsp->link_quality_indicator = cpu_to_be32((u32)lq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) pma_get_opa_port_dctrs(ibdev, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) * Convert PortXmitWait counter from TXE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) * cycle times to flit times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) link_width =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) tx_link_width(ppd->link_width_downgrade_tx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) link_speed = get_link_speed(ppd->link_speed_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) rsp->port_xmit_wait =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) link_speed, C_VL_COUNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) rsp->port_rcv_fecn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) rsp->port_rcv_becn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) rsp->port_error_counter_summary =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) cpu_to_be64(get_error_counter_summary(ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) res_lli, res_ler));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) vlinfo = &rsp->vls[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) vfi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) /* The vl_select_mask has been checked above, and we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) * that it contains only entries which represent valid VLs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) * So in the for_each_set_bit() loop below, we don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) * any additional checks for vl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) memset(vlinfo, 0, sizeof(*vlinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) rsp->vls[vfi].port_vl_xmit_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) rsp->vls[vfi].port_vl_rcv_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) rsp->vls[vfi].port_vl_xmit_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) rsp->vls[vfi].port_vl_rcv_pkts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) * Convert PortVlXmitWait counter from TXE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) * cycle times to flit times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) rsp->vls[vfi].port_vl_xmit_wait =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) link_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) rsp->vls[vfi].port_vl_rcv_fecn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) rsp->vls[vfi].port_vl_rcv_becn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) /* rsp->port_vl_xmit_wasted_bw ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) * does this differ from rsp->vls[vfi].port_vl_xmit_wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) /*rsp->vls[vfi].port_vl_mark_fecn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) * + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) vlinfo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) vfi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) a0_datacounters(ppd, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) *resp_len += response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct ib_device *ibdev, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) struct _port_dctrs rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) memset(&rsp, 0, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) pma_get_opa_port_dctrs(ibdev, &rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) p->port_xmit_data = rsp.port_xmit_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) p->port_rcv_data = rsp.port_rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) p->port_xmit_packets = rsp.port_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) p->port_rcv_packets = rsp.port_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) p->port_unicast_xmit_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) p->port_unicast_rcv_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) struct _port_ectrs *rsp, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) u64 tmp, tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) /* overflow/wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) rsp->link_error_recovery = cpu_to_be32(~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) rsp->link_error_recovery = cpu_to_be32(tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) rsp->port_rcv_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) rsp->port_rcv_remote_physical_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) rsp->port_rcv_switch_relay_errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) rsp->port_xmit_discards =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) rsp->port_xmit_constraint_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) rsp->port_rcv_constraint_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) rsp->local_link_integrity_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) rsp->excessive_buffer_overruns =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) u8 port, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) size_t response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) struct _port_ectrs *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) struct opa_port_error_counters64_msg *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) u32 num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) u8 num_pslm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) u8 num_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) struct _vls_ectrs *vlinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) unsigned long vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) u64 port_mask, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) unsigned long vl_select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) int vfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) req = (struct opa_port_error_counters64_msg *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) if (num_ports != 1 || num_ports != num_pslm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) response_data_size = struct_size(req, port[0].vls, num_vls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if (response_data_size > sizeof(pmp->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) * The bit set in the mask needs to be consistent with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) * port the request came in on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) port_mask = be64_to_cpu(req->port_select_mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) port_num = find_first_bit((unsigned long *)&port_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) sizeof(port_mask) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (port_num != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) rsp = &req->port[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) ibp = to_iport(ibdev, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) memset(rsp, 0, sizeof(*rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) rsp->port_number = port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) pma_get_opa_port_ectrs(ibdev, rsp, port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) rsp->port_rcv_remote_physical_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) rsp->fm_config_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) rsp->port_rcv_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) vlinfo = &rsp->vls[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) vfi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) vl_select_mask = be32_to_cpu(req->vl_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) memset(vlinfo, 0, sizeof(*vlinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) rsp->vls[vfi].port_vl_xmit_discards =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) idx_from_vl(vl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) vlinfo += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) vfi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) *resp_len += response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) struct ib_device *ibdev, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) struct _port_ectrs rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) u64 temp_link_overrun_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) u64 temp_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) u32 temp_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) memset(&rsp, 0, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) pma_get_opa_port_ectrs(ibdev, &rsp, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) p->symbol_error_counter = 0; /* N/A for OPA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) temp_32 = be32_to_cpu(rsp.link_error_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) if (temp_32 > 0xFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) p->link_error_recovery_counter = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) p->link_error_recovery_counter = (u8)temp_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) temp_32 = be32_to_cpu(rsp.link_downed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) if (temp_32 > 0xFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) p->link_downed_counter = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) p->link_downed_counter = (u8)temp_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) temp_64 = be64_to_cpu(rsp.port_rcv_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) if (temp_64 > 0xFFFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) p->port_rcv_errors = cpu_to_be16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) p->port_rcv_errors = cpu_to_be16((u16)temp_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) if (temp_64 > 0xFFFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) temp_64 = be64_to_cpu(rsp.port_xmit_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (temp_64 > 0xFFFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) p->port_xmit_discards = cpu_to_be16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) p->port_xmit_discards = cpu_to_be16((u16)temp_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (temp_64 > 0xFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) p->port_xmit_constraint_errors = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) p->port_xmit_constraint_errors = (u8)temp_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) if (temp_64 > 0xFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) p->port_rcv_constraint_errors = 0xFFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) p->port_rcv_constraint_errors = (u8)temp_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) /* LocalLink: 7:4, BufferOverrun: 3:0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) if (temp_64 > 0xFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) temp_64 = 0xFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) temp_link_overrun_errors = temp_64 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (temp_64 > 0xFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) temp_64 = 0xFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) temp_link_overrun_errors |= temp_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) p->link_overrun_errors = (u8)temp_link_overrun_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) p->vl15_dropped = 0; /* N/A for OPA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) u8 port, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) size_t response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) struct _port_ei *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) struct opa_port_error_info_msg *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) u64 port_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) u32 num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) u8 num_pslm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) req = (struct opa_port_error_info_msg *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) rsp = &req->port[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) memset(rsp, 0, sizeof(*rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (num_ports != 1 || num_ports != num_pslm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) response_data_size = sizeof(struct opa_port_error_info_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) if (response_data_size > sizeof(pmp->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) * The bit set in the mask needs to be consistent with the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * the request came in on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) port_mask = be64_to_cpu(req->port_select_mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) port_num = find_first_bit((unsigned long *)&port_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) sizeof(port_mask) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) if (port_num != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) rsp->port_number = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) /* PortRcvErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) rsp->port_rcv_ei.status_and_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) dd->err_info_rcvport.status_and_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) &dd->err_info_rcvport.packet_flit1, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) &dd->err_info_rcvport.packet_flit2, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) /* ExcessiverBufferOverrunInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) reg = read_csr(dd, RCV_ERR_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) * if the RcvExcessBufferOverrun bit is set, save SC of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) * first pkt that encountered an excess buffer overrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) u8 tmp = (u8)reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) tmp <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) /* set the status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) rsp->port_xmit_constraint_ei.status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) dd->err_info_xmit_constraint.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) rsp->port_xmit_constraint_ei.pkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) cpu_to_be16(dd->err_info_xmit_constraint.pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) rsp->port_xmit_constraint_ei.slid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) cpu_to_be32(dd->err_info_xmit_constraint.slid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) rsp->port_rcv_constraint_ei.status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) dd->err_info_rcv_constraint.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) rsp->port_rcv_constraint_ei.pkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) cpu_to_be16(dd->err_info_rcv_constraint.pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) rsp->port_rcv_constraint_ei.slid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) cpu_to_be32(dd->err_info_rcv_constraint.slid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) /* UncorrectableErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) /* FMConfigErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) *resp_len += response_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) u8 port, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) struct opa_clear_port_status *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) (struct opa_clear_port_status *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) u64 portn = be64_to_cpu(req->port_select_mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) u32 counter_select = be32_to_cpu(req->counter_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) unsigned long vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if ((nports != 1) || (portn != 1 << port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) * only counters returned by pma_get_opa_portstatus() are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) * handled, so when pma_get_opa_portstatus() gets a fix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) * the corresponding change should be made here as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (counter_select & CS_PORT_XMIT_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) if (counter_select & CS_PORT_RCV_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) if (counter_select & CS_PORT_XMIT_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) if (counter_select & CS_PORT_RCV_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if (counter_select & CS_PORT_MCAST_RCV_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (counter_select & CS_PORT_XMIT_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) /* ignore cs_sw_portCongestion for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) if (counter_select & CS_PORT_RCV_FECN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (counter_select & CS_PORT_RCV_BECN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) /* ignore cs_port_xmit_time_cong for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) /* ignore cs_port_xmit_wasted_bw for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) /* ignore cs_port_xmit_wait_data for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) if (counter_select & CS_PORT_RCV_BUBBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) /* Only applicable for switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) /* if (counter_select & CS_PORT_MARK_FECN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) /* ignore cs_port_rcv_switch_relay_errors for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) if (counter_select & CS_PORT_XMIT_DISCARDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (counter_select & CS_LINK_ERROR_RECOVERY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) if (counter_select & CS_PORT_RCV_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) dd->rcv_ovfl_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) if (counter_select & CS_FM_CONFIG_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) if (counter_select & CS_LINK_DOWNED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (counter_select & CS_UNCORRECTABLE_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (counter_select & CS_PORT_XMIT_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (counter_select & CS_PORT_RCV_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (counter_select & CS_PORT_XMIT_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (counter_select & CS_PORT_RCV_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) if (counter_select & CS_PORT_XMIT_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) /* sw_port_vl_congestion is 0 for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) if (counter_select & CS_PORT_RCV_FECN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) if (counter_select & CS_PORT_RCV_BECN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) /* port_vl_xmit_time_cong is 0 for HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) /* port_vl_xmit_wasted_bw ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) if (counter_select & CS_PORT_RCV_BUBBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) /* if (counter_select & CS_PORT_MARK_FECN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) if (counter_select & C_SW_XMIT_DSCD_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) idx_from_vl(vl), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) *resp_len += sizeof(*req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) u8 port, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) struct _port_ei *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) struct opa_port_error_info_msg *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) u64 port_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) u32 num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) u8 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) u8 num_pslm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) u32 error_info_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) req = (struct opa_port_error_info_msg *)pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) rsp = &req->port[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) memset(rsp, 0, sizeof(*rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) if (num_ports != 1 || num_ports != num_pslm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) * The bit set in the mask needs to be consistent with the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) * the request came in on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) port_mask = be64_to_cpu(req->port_select_mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) port_num = find_first_bit((unsigned long *)&port_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) sizeof(port_mask) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (port_num != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) error_info_select = be32_to_cpu(req->error_info_select_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) /* PortRcvErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) if (error_info_select & ES_PORT_RCV_ERROR_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) /* turn off status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) /* ExcessiverBufferOverrunInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) * status bit is essentially kept in the h/w - bit 5 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) * RCV_ERR_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) write_csr(dd, RCV_ERR_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) /* UncorrectableErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) /* turn off status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) /* FMConfigErrorInfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) /* turn off status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) *resp_len += sizeof(*req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) struct opa_congestion_info_attr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) __be16 congestion_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) u8 congestion_log_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) struct opa_congestion_info_attr *p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) (struct opa_congestion_info_attr *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) if (smp_length_check(sizeof(*p), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) p->congestion_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) p->control_table_cap = ppd->cc_max_table_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) p->congestion_log_length = OPA_CONG_LOG_ELEMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) *resp_len += sizeof(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) u8 *data, struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) u8 port, u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) struct opa_congestion_setting_attr *p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) (struct opa_congestion_setting_attr *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) struct opa_congestion_setting_entry_shadow *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) if (smp_length_check(sizeof(*p), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) cc_state = get_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) if (!cc_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) entries = cc_state->cong_setting.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) for (i = 0; i < OPA_MAX_SLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) p->entries[i].ccti_increase = entries[i].ccti_increase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) p->entries[i].trigger_threshold =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) entries[i].trigger_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) p->entries[i].ccti_min = entries[i].ccti_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) *resp_len += sizeof(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) * Apply congestion control information stored in the ppd to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) * active structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) static void apply_cc_state(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) struct cc_state *old_cc_state, *new_cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) if (!new_cc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) * Hold the lock for updating *and* to prevent ppd information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) * from changing during the update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) spin_lock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) old_cc_state = get_cc_state_protected(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) if (!old_cc_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) /* never active, or shutting down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) spin_unlock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) kfree(new_cc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) *new_cc_state = *old_cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) if (ppd->total_cct_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) new_cc_state->cct.ccti_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) rcu_assign_pointer(ppd->cc_state, new_cc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) spin_unlock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) kfree_rcu(old_cc_state, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) struct opa_congestion_setting_attr *p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) (struct opa_congestion_setting_attr *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) struct opa_congestion_setting_entry_shadow *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) if (smp_length_check(sizeof(*p), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) * Save details from packet into the ppd. Hold the cc_state_lock so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) * our information is consistent with anyone trying to apply the state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) spin_lock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) entries = ppd->congestion_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) for (i = 0; i < OPA_MAX_SLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) entries[i].ccti_increase = p->entries[i].ccti_increase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) entries[i].trigger_threshold =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) p->entries[i].trigger_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) entries[i].ccti_min = p->entries[i].ccti_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) spin_unlock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) /* now apply the information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) apply_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) u8 *data, struct ib_device *ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) u8 port, u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (am || smp_length_check(sizeof(*cong_log), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) spin_lock_irq(&ppd->cc_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) cong_log->congestion_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) cong_log->threshold_event_counter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) cpu_to_be16(ppd->threshold_event_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) memcpy(cong_log->threshold_cong_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) ppd->threshold_cong_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) sizeof(cong_log->threshold_cong_event_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) /* keep timestamp in units of 1.024 usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) ts = ktime_get_ns() / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) cong_log->current_time_stamp = cpu_to_be32(ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) struct opa_hfi1_cong_log_event_internal *cce =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) &ppd->cc_events[ppd->cc_mad_idx++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) ppd->cc_mad_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) * Entries which are older than twice the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) * required to wrap the counter are supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) if ((ts - cce->timestamp) / 2 > U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) memcpy(cong_log->events[i].remote_qp_number_cn_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) &cce->rqpn, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) cong_log->events[i].sl_svc_type_cn_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) cong_log->events[i].remote_lid_cn_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) cpu_to_be32(cce->rlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) cong_log->events[i].timestamp_cn_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) cpu_to_be32(cce->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) * Reset threshold_cong_event_map, and threshold_event_counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) * to 0 when log is read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) memset(ppd->threshold_cong_event_map, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) sizeof(ppd->threshold_cong_event_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) ppd->threshold_event_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) spin_unlock_irq(&ppd->cc_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) *resp_len += sizeof(struct opa_hfi1_cong_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) struct ib_cc_table_attr *cc_table_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) (struct ib_cc_table_attr *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) u32 start_block = OPA_AM_START_BLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) u32 n_blocks = OPA_AM_NBLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) struct ib_cc_table_entry_shadow *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) u32 sentry, eentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) /* sanity check n_blocks, start_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) if (n_blocks == 0 || smp_length_check(size, max_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) start_block + n_blocks > ppd->cc_max_table_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) cc_state = get_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) if (!cc_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) sentry = start_block * IB_CCT_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) entries = cc_state->cct.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) /* return n_blocks, though the last block may not be full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) for (j = 0, i = sentry; i < eentry; j++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) cc_table_attr->ccti_entries[j].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) cpu_to_be16(entries[i].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) *resp_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) u32 start_block = OPA_AM_START_BLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) u32 n_blocks = OPA_AM_NBLK(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) struct ib_cc_table_entry_shadow *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) u32 sentry, eentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) u16 ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) /* sanity check n_blocks, start_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) if (n_blocks == 0 || smp_length_check(size, max_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) start_block + n_blocks > ppd->cc_max_table_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) sentry = start_block * IB_CCT_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) /* sanity check ccti_limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) ccti_limit = be16_to_cpu(p->ccti_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) if (ccti_limit + 1 > eentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) * Save details from packet into the ppd. Hold the cc_state_lock so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) * our information is consistent with anyone trying to apply the state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) spin_lock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) ppd->total_cct_entry = ccti_limit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) entries = ppd->ccti_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) for (j = 0, i = sentry; i < eentry; j++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) spin_unlock(&ppd->cc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) /* now apply the information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) apply_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) struct opa_led_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) __be32 rsvd_led_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) __be32 rsvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) #define OPA_LED_SHIFT 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) #define OPA_LED_MASK BIT(OPA_LED_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) struct opa_led_info *p = (struct opa_led_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) u32 nport = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) u32 is_beaconing_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * This pairs with the memory barrier in hfi1_start_led_override to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) * ensure that we read the correct state of LED beaconing represented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) * by led_override_timer_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if (resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) *resp_len += sizeof(struct opa_led_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) struct opa_led_info *p = (struct opa_led_info *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) u32 nport = OPA_AM_NPORT(am);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) hfi1_start_led_override(dd->pport, 2000, 1500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) shutdown_led_override(dd->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) u8 *data, struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) u32 *resp_len, u32 max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) switch (attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) case IB_SMP_ATTR_NODE_DESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) case IB_SMP_ATTR_NODE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) case IB_SMP_ATTR_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) case IB_SMP_ATTR_PKEY_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) case OPA_ATTRIB_ID_SL_TO_SC_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) case OPA_ATTRIB_ID_SC_TO_SL_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) case OPA_ATTRIB_ID_PORT_STATE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) case OPA_ATTRIB_ID_CABLE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) case IB_SMP_ATTR_VL_ARB_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) case OPA_ATTRIB_ID_CONGESTION_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) port, resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) port, resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) case IB_SMP_ATTR_LED_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) case IB_SMP_ATTR_SM_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) if (ibp->rvp.port_cap_flags & IB_PORT_SM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) smp->status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) ret = reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) u8 *data, struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) u32 *resp_len, u32 max_len, int local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) switch (attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) case IB_SMP_ATTR_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) resp_len, max_len, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) case IB_SMP_ATTR_PKEY_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) case OPA_ATTRIB_ID_SL_TO_SC_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) case OPA_ATTRIB_ID_SC_TO_SL_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) case OPA_ATTRIB_ID_PORT_STATE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) resp_len, max_len, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) case IB_SMP_ATTR_VL_ARB_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) port, resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) case IB_SMP_ATTR_LED_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) resp_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) case IB_SMP_ATTR_SM_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) if (ibp->rvp.port_cap_flags & IB_PORT_SM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) return IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) smp->status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) ret = reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) static inline void set_aggr_error(struct opa_aggregate *ag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) ag->err_reqlength |= cpu_to_be16(0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) static int subn_get_opa_aggregate(struct opa_smp *smp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) u8 *next_smp = opa_get_smp_data(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) if (num_attr < 1 || num_attr > 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) for (i = 0; i < num_attr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) struct opa_aggregate *agg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) size_t agg_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) size_t agg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) u32 am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) agg = (struct opa_aggregate *)next_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) agg_size = sizeof(*agg) + agg_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) am = be32_to_cpu(agg->attr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) *resp_len += agg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) /* zero the payload for this segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) memset(next_smp + sizeof(*agg), 0, agg_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) ibdev, port, NULL, (u32)agg_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) if (smp->status & IB_SMP_INVALID_FIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) if (smp->status & ~IB_SMP_DIRECTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) set_aggr_error(agg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) next_smp += agg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) static int subn_set_opa_aggregate(struct opa_smp *smp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) u32 *resp_len, int local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) u8 *next_smp = opa_get_smp_data(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) if (num_attr < 1 || num_attr > 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) for (i = 0; i < num_attr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) struct opa_aggregate *agg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) size_t agg_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) size_t agg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) u32 am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) agg = (struct opa_aggregate *)next_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) agg_size = sizeof(*agg) + agg_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) am = be32_to_cpu(agg->attr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) *resp_len += agg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) smp->status |= IB_SMP_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) ibdev, port, NULL, (u32)agg_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) if (smp->status & IB_SMP_INVALID_FIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) if (smp->status & ~IB_SMP_DIRECTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) set_aggr_error(agg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) next_smp += agg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) return reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) * OPAv1 specifies that, on the transition to link up, these counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) * are cleared:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) * PortRcvErrors [*]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) * LinkErrorRecovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) * LocalLinkIntegrityErrors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) * ExcessiveBufferOverruns [*]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) * [*] Error info associated with these counters is retained, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) * error info status is reset to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) void clear_linkup_counters(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) /* PortRcvErrors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) /* LinkErrorRecovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) /* LocalLinkIntegrityErrors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) /* ExcessiveBufferOverruns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) dd->rcv_ovfl_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) * local node, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) const struct ib_wc *in_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) const struct opa_smp *smp = (const struct opa_smp *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) return (smp->hop_cnt == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) return (in_wc->slid == ppd->lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) * opa_local_smp_check() should only be called on MADs for which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) * is_local_mad() returns true. It applies the SMP checks that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) * specific to SMPs which are sent from, and destined to this node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) * SMPs which arrive from other nodes are instead checked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) * opa_smp_check().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) static int opa_local_smp_check(struct hfi1_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) const struct ib_wc *in_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) pkey = ppd->pkeys[in_wc->pkey_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) * We need to do the "node-local" checks specified in OPAv1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) * rev 0.90, section 9.10.26, which are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) * - pkey is 0x7fff, or 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) * - Source QPN == 0 || Destination QPN == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) * - the MAD header's management class is either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) * IB_MGMT_CLASS_SUBN_LID_ROUTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) * - SLID != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) * However, we know (and so don't need to check again) that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) * for local SMPs, the MAD stack passes MADs with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) * - Source QPN of 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) * our own port's lid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) * @ibp: IB port data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) * @in_mad: MAD packet with header and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) * @in_wc: Work completion data such as source LID, port number, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) * These are all the possible logic rules for validating a pkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) * and NOT self-originated packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) * Drop MAD packet as it should always be part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) * management partition unless it's a self-originated packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) * The packet is coming from a management node and the receiving node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) * is also a management node, so it is safe for the packet to go through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) * It could be an FM misconfiguration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) * It is safe for the packet to go through since a non-management node is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) * talking to another non-management node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) * Drop the packet because a non-management node is talking to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) * management node, and it could be an attack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) * For the implementation, these rules can be simplied to only checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) * for (a) and (e). There's no need to check for rule (b) as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) * the packet doesn't need to be dropped. Rule (c) is not possible in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) * the driver as LIM_MGMT_P_KEY is always in the pkey table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) * 0 - pkey is okay, -EINVAL it's a bad pkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) const struct opa_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) const struct ib_wc *in_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) /* Rule (a) from above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (!is_local_mad(ibp, in_mad, in_wc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) pkey_value != LIM_MGMT_P_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) pkey_value != FULL_MGMT_P_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) /* Rule (e) from above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) if (pkey_value == LIM_MGMT_P_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) is_full_mgmt_pkey_in_table(ibp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) u8 port, const struct opa_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) struct opa_mad *out_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) u32 *resp_len, int local_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) struct opa_smp *smp = (struct opa_smp *)out_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) u32 am, data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) __be16 attr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) *out_mad = *in_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) data = opa_get_smp_data(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) data_size = (u32)opa_get_smp_data_size(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) am = be32_to_cpu(smp->attr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) attr_id = smp->attr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) if (smp->class_version != OPA_SM_CLASS_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) smp->status |= IB_SMP_UNSUP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) ret = reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) smp->route.dr.dr_slid, smp->route.dr.return_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) smp->hop_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) u32 port_num = be32_to_cpu(smp->attr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) * If this is a get/set portinfo, we already check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) * M_Key if the MAD is for another port and the M_Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) * is OK on the receiving port. This check is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) * to increment the error counters when the M_Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) * fails to match on *both* ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) if (attr_id == IB_SMP_ATTR_PORT_INFO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) (smp->method == IB_MGMT_METHOD_GET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) smp->method == IB_MGMT_METHOD_SET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) port_num && port_num <= ibdev->phys_port_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) port != port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) (void)check_mkey(to_iport(ibdev, port_num),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) (struct ib_mad_hdr *)smp, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) smp->mkey, smp->route.dr.dr_slid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) smp->route.dr.return_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) smp->hop_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) ret = IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) *resp_len = opa_get_smp_header_size(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) switch (smp->method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) case IB_MGMT_METHOD_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) switch (attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) clear_opa_smp_data(smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) ret = subn_get_opa_sma(attr_id, smp, am, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) case OPA_ATTRIB_ID_AGGREGATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) ret = subn_get_opa_aggregate(smp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) case IB_MGMT_METHOD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) switch (attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) ret = subn_set_opa_sma(attr_id, smp, am, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) ibdev, port, resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) data_size, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) case OPA_ATTRIB_ID_AGGREGATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) ret = subn_set_opa_aggregate(smp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) resp_len, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) case IB_MGMT_METHOD_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) case IB_MGMT_METHOD_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) case IB_MGMT_METHOD_REPORT_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) case IB_MGMT_METHOD_GET_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) * The ib_mad module will call us to process responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) * before checking for other consumers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) * Just tell the caller to process it normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) ret = IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) case IB_MGMT_METHOD_TRAP_REPRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) subn_handle_opa_trap_repress(ibp, smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) /* Always successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) ret = IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) smp->status |= IB_SMP_UNSUP_METHOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) ret = reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) static int process_subn(struct ib_device *ibdev, int mad_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) u8 port, const struct ib_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) struct ib_mad *out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) struct ib_smp *smp = (struct ib_smp *)out_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) *out_mad = *in_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) if (smp->class_version != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) smp->status |= IB_SMP_UNSUP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) ret = reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) smp->mkey, (__force __be32)smp->dr_slid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) smp->return_path, smp->hop_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) u32 port_num = be32_to_cpu(smp->attr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) * If this is a get/set portinfo, we already check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) * M_Key if the MAD is for another port and the M_Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) * is OK on the receiving port. This check is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) * to increment the error counters when the M_Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) * fails to match on *both* ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) (smp->method == IB_MGMT_METHOD_GET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) smp->method == IB_MGMT_METHOD_SET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) port_num && port_num <= ibdev->phys_port_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) port != port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) (void)check_mkey(to_iport(ibdev, port_num),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) (struct ib_mad_hdr *)smp, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) smp->mkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) (__force __be32)smp->dr_slid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) smp->return_path, smp->hop_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) ret = IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) switch (smp->method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) case IB_MGMT_METHOD_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) switch (smp->attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) case IB_SMP_ATTR_NODE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) ret = subn_get_nodeinfo(smp, ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) smp->status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) ret = reply((struct ib_mad_hdr *)smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) static int process_perf(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) const struct ib_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) struct ib_mad *out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) struct ib_class_port_info *cpi = (struct ib_class_port_info *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) &pmp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) int ret = IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) *out_mad = *in_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) if (pmp->mad_hdr.class_version != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) switch (pmp->mad_hdr.method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) case IB_MGMT_METHOD_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) switch (pmp->mad_hdr.attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) case IB_PMA_PORT_COUNTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) ret = pma_get_ib_portcounters(pmp, ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) case IB_PMA_PORT_COUNTERS_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) case IB_PMA_CLASS_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) case IB_MGMT_METHOD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) if (pmp->mad_hdr.attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) case IB_MGMT_METHOD_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) case IB_MGMT_METHOD_GET_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) * The ib_mad module will call us to process responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) * before checking for other consumers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) * Just tell the caller to process it normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) ret = IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) static int process_perf_opa(struct ib_device *ibdev, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) const struct opa_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) struct opa_mad *out_mad, u32 *resp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) *out_mad = *in_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) return reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) *resp_len = sizeof(pmp->mad_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) switch (pmp->mad_hdr.method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) case IB_MGMT_METHOD_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) switch (pmp->mad_hdr.attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) case IB_PMA_CLASS_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) case OPA_PM_ATTRIB_ID_PORT_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) ret = pma_get_opa_portstatus(pmp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) ret = pma_get_opa_datacounters(pmp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) ret = pma_get_opa_porterrors(pmp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) case OPA_PM_ATTRIB_ID_ERROR_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) ret = pma_get_opa_errorinfo(pmp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) case IB_MGMT_METHOD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) switch (pmp->mad_hdr.attr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) ret = pma_set_opa_portstatus(pmp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) case OPA_PM_ATTRIB_ID_ERROR_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) ret = pma_set_opa_errorinfo(pmp, ibdev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) case IB_MGMT_METHOD_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) case IB_MGMT_METHOD_GET_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) * The ib_mad module will call us to process responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) * before checking for other consumers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) * Just tell the caller to process it normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) ret = IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) ret = reply((struct ib_mad_hdr *)pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) u8 port, const struct ib_wc *in_wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) const struct opa_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) struct opa_mad *out_mad, size_t *out_mad_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) u16 *out_mad_pkey_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) int pkey_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) int local_mad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) struct hfi1_ibport *ibp = to_iport(ibdev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) if (pkey_idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) hfi1_get_pkey(ibp, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) pkey_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) *out_mad_pkey_index = (u16)pkey_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) switch (in_mad->mad_hdr.mgmt_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) case IB_MGMT_CLASS_SUBN_LID_ROUTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) local_mad = is_local_mad(ibp, in_mad, in_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) if (local_mad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) ret = opa_local_smp_check(ibp, in_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) return IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) out_mad, &resp_len, local_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) case IB_MGMT_CLASS_PERF_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) return IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) ret = IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) if (ret & IB_MAD_RESULT_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) *out_mad_size = round_up(resp_len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) else if (ret & IB_MAD_RESULT_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) const struct ib_wc *in_wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) const struct ib_mad *in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) struct ib_mad *out_mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) switch (in_mad->mad_hdr.mgmt_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) case IB_MGMT_CLASS_SUBN_LID_ROUTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) case IB_MGMT_CLASS_PERF_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) ret = process_perf(ibdev, port, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) ret = IB_MAD_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) * hfi1_process_mad - process an incoming MAD packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) * @ibdev: the infiniband device this packet came in on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) * @mad_flags: MAD flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) * @port: the port number this packet came in on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) * @in_wc: the work completion entry for this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) * @in_grh: the global route header for this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) * @in_mad: the incoming MAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) * @out_mad: any outgoing MAD reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) * interested in processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) * Note that the verbs framework has already done the MAD sanity checks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) * MADs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) * This is called by the ib_mad module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) const struct ib_wc *in_wc, const struct ib_grh *in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) const struct ib_mad *in_mad, struct ib_mad *out_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) size_t *out_mad_size, u16 *out_mad_pkey_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) switch (in_mad->mad_hdr.base_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) case OPA_MGMT_BASE_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) return hfi1_process_opa_mad(ibdev, mad_flags, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) in_wc, in_grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) (struct opa_mad *)in_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) (struct opa_mad *)out_mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) out_mad_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) out_mad_pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) case IB_MGMT_BASE_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) in_grh, in_mad, out_mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) return IB_MAD_RESULT_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) }