^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) #ifndef _HFI1_SDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define _HFI1_SDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright(c) 2015 - 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "verbs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "sdma_txreq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Hardware limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define MAX_DESC 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Hardware limit for SDMA packet size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SDMA_MAP_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SDMA_MAP_SINGLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SDMA_MAP_PAGE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SDMA_AHG_VALUE_MASK 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SDMA_AHG_VALUE_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SDMA_AHG_INDEX_MASK 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SDMA_AHG_INDEX_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SDMA_AHG_FIELD_LEN_MASK 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SDMA_AHG_FIELD_LEN_SHIFT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SDMA_AHG_FIELD_START_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SDMA_AHG_FIELD_START_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* AHG modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Be aware the ordering and values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * for SDMA_AHG_APPLY_UPDATE[123]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * are assumed in generating a skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * count in submit_tx() in sdma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SDMA_AHG_NO_AHG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SDMA_AHG_COPY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SDMA_AHG_APPLY_UPDATE1 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define SDMA_AHG_APPLY_UPDATE2 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SDMA_AHG_APPLY_UPDATE3 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Bits defined in the send DMA descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define SDMA_DESC0_BYTE_COUNT_SHIFT 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SDMA_DESC0_BYTE_COUNT_WIDTH 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SDMA_DESC0_BYTE_COUNT_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define SDMA_DESC0_BYTE_COUNT_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define SDMA_DESC0_PHY_ADDR_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define SDMA_DESC0_PHY_ADDR_WIDTH 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define SDMA_DESC0_PHY_ADDR_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SDMA_DESC0_PHY_ADDR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SDMA_DESC1_HEADER_UPDATE1_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SDMA_DESC1_HEADER_UPDATE1_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SDMA_DESC1_HEADER_MODE_SHIFT 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SDMA_DESC1_HEADER_MODE_WIDTH 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SDMA_DESC1_HEADER_MODE_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define SDMA_DESC1_HEADER_MODE_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define SDMA_DESC1_HEADER_INDEX_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SDMA_DESC1_HEADER_INDEX_WIDTH 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SDMA_DESC1_HEADER_INDEX_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define SDMA_DESC1_HEADER_INDEX_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define SDMA_DESC1_HEADER_DWS_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SDMA_DESC1_HEADER_DWS_WIDTH 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SDMA_DESC1_HEADER_DWS_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define SDMA_DESC1_HEADER_DWS_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define SDMA_DESC1_GENERATION_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define SDMA_DESC1_GENERATION_WIDTH 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define SDMA_DESC1_GENERATION_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define SDMA_DESC1_GENERATION_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) enum sdma_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sdma_state_s00_hw_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) sdma_state_s10_hw_start_up_halt_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sdma_state_s15_hw_start_up_clean_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sdma_state_s20_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) sdma_state_s30_sw_clean_up_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sdma_state_s40_hw_clean_up_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sdma_state_s50_hw_halt_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) sdma_state_s60_idle_halt_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) sdma_state_s80_hw_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sdma_state_s82_freeze_sw_clean,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sdma_state_s99_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) enum sdma_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) sdma_event_e00_go_hw_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) sdma_event_e10_go_hw_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) sdma_event_e15_hw_halt_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) sdma_event_e25_hw_clean_up_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) sdma_event_e30_go_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sdma_event_e40_sw_cleaned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sdma_event_e50_hw_cleaned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) sdma_event_e60_hw_halted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) sdma_event_e70_go_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) sdma_event_e80_hw_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) sdma_event_e81_hw_frozen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sdma_event_e82_hw_unfreeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) sdma_event_e85_link_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sdma_event_e90_sw_halted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct sdma_set_state_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned op_enable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned op_intenable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned op_halt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned op_cleanup:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned go_s99_running_tofalse:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned go_s99_running_totrue:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct sdma_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) enum sdma_states current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned current_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned go_s99_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* debugging/development */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) enum sdma_states previous_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned previous_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) enum sdma_events last_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * DOC: sdma exported routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * These sdma routines fit into three categories:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * - The SDMA API for building and submitting packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * to the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * - Initialization and tear down routines to buildup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * and tear down SDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * - ISR entrances to handle interrupts, state changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * and errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * DOC: sdma PSM/verbs API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * The sdma API is designed to be used by both PSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * and verbs to supply packets to the SDMA ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * The usage of the API is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Embed a struct iowait in the QP or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * PQ. The iowait should be initialized with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * call to iowait_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * The user of the API should create an allocation method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * for their version of the txreq. slabs, pre-allocated lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * and dma pools can be used. Once the user's overload of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * the sdma_txreq has been allocated, the sdma_txreq member
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * must be initialized with sdma_txinit() or sdma_txinit_ahg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * The txreq must be declared with the sdma_txreq first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * The tx request, once initialized, is manipulated with calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * for each disjoint memory location. It is the user's responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * to understand the packet boundaries and page boundaries to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * appropriate number of sdma_txadd_* calls.. The user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * must be prepared to deal with failures from these routines due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * either memory allocation or dma_mapping failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * The mapping specifics for each memory location are recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * in the tx. Memory locations added with sdma_txadd_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * and sdma_txadd_kvaddr() are automatically mapped when added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * to the tx and nmapped as part of the progress processing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * SDMA interrupt handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * tx. An example of a use case would be a pre-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * set of headers allocated via dma_pool_alloc() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * dma_alloc_coherent(). For these memory locations, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * is the responsibility of the user to handle that unmapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * (This would usually be at an unload or job termination.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * The routine sdma_send_txreq() is used to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * a tx to the ring after the appropriate number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * sdma_txadd_* have been done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * can be used to submit a list of packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * The user is free to use the link overhead in the struct sdma_txreq as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * long as the tx isn't in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * The extreme degenerate case of the number of descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * exceeding the ring size is automatically handled as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * memory locations are added. An overflow of the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * array that is part of the sdma_txreq is also automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * DOC: Infrastructure calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * sdma_init() is used to initialize data structures and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * CSRs for the desired number of SDMA engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * sdma_start() is used to kick the SDMA engines initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * with sdma_init(). Interrupts must be enabled at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * point since aspects of the state machine are interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * driven.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * sdma_engine_error() and sdma_engine_interrupt() are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * entrances for interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * sdma_map_init() is for the management of the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * table when the number of vls is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * struct hw_sdma_desc - raw 128 bit SDMA descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * This is the raw descriptor in the SDMA ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct hw_sdma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* private: don't use directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __le64 qw[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * struct sdma_engine - Data pertaining to each SDMA engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @dd: a back-pointer to the device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @ppd: per port back-pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @imask: mask for irq manipulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @idle_mask: mask for determining if an interrupt is due to sdma_idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * This structure has the state for each sdma_engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * Accessing to non public fields are not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * since the private members are subject to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct sdma_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* read mostly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void __iomem *tail_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u64 imask; /* clear interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u64 idle_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u64 progress_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u64 int_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) volatile __le64 *head_dma; /* DMA'ed by chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dma_addr_t head_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct hw_sdma_desc *descq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned descq_full_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct sdma_txreq **tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dma_addr_t descq_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u32 sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct sdma_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u8 sdma_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u8 this_idx; /* zero relative engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* protect changes to senddmactrl shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spinlock_t senddmactrl_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* read/write using tail_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) spinlock_t tail_lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u64 tail_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u32 descq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long ahg_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u16 desc_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u16 tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u16 descq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* read/write using head_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) seqlock_t head_lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u64 head_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u32 descq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u16 tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u64 last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) u64 err_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u64 sdma_int_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u64 idle_int_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u64 progress_int_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) seqlock_t waitlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct list_head dmawait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* CONFIG SDMA for now, just blindly duplicate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct tasklet_struct sdma_hw_clean_up_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct tasklet_struct sdma_sw_clean_up_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct work_struct err_halt_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct timer_list err_progress_check_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 progress_check_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct work_struct flush_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* protect flush list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spinlock_t flushlist_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct list_head flushlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct cpumask cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u32 msix_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int sdma_init(struct hfi1_devdata *dd, u8 port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) void sdma_start(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) void sdma_exit(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) void sdma_all_running(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) void sdma_all_idle(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void sdma_freeze(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void sdma_unfreeze(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void sdma_wait(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * sdma_empty() - idle engine test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * @engine: sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Currently used by verbs as a latency optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * 1 - empty, 0 - non-empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static inline int sdma_empty(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return sde->descq_tail == sde->descq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return sde->descq_cnt -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) (sde->descq_tail -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) READ_ONCE(sde->descq_head)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return sde->descq_cnt - sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Either head_lock or tail lock required to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * a steady state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static inline int __sdma_running(struct sdma_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return engine->state.current_state == sdma_state_s99_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * sdma_running() - state suitability test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @engine: sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * sdma_running probes the internal state to determine if it is suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * for submitting packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * 1 - ok to submit, 0 - not ok to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static inline int sdma_running(struct sdma_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) spin_lock_irqsave(&engine->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ret = __sdma_running(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) spin_unlock_irqrestore(&engine->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void _sdma_txreq_ahgadd(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) u8 num_ahg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u8 ahg_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) u32 *ahg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u8 ahg_hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @tx: tx request to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * @flags: flags to key last descriptor additions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @tlen: total packet length (pbc + headers + data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @ahg_entry: ahg entry to use (0 - 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * @num_ahg: ahg descriptor for first descriptor (0 - 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @ahg: array of AHG descriptors (up to 9 entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * @ahg_hlen: number of bytes from ASIC entry to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @cb: callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * The allocation of the sdma_txreq and it enclosing structure is user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * dependent. This routine must be called to initialize the user independent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * The currently supported flags are SDMA_TXREQ_F_URGENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * completion is desired as soon as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * the AHG descriptors into the first 1 to 3 descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Completions of submitted requests can be gotten on selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * txreqs by giving a completion routine callback to sdma_txinit() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * sdma_txinit_ahg(). The environment in which the callback runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * can be from an ISR, a tasklet, or a thread, so no sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * kernel routines can be used. Aspects of the sdma ring may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * be locked so care should be taken with locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * The callback pointer can be NULL to avoid any callback for the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * being submitted. The callback will be provided this tx, a status, and a flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * The flag, if the is the iowait had been used, indicates the iowait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * sdma_busy count has reached zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * user data portion of tlen should be precise. The sdma_txadd_* entrances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * will pad with a descriptor references 1 - 3 bytes when the number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * specified in tlen have been supplied to the sdma_txreq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * ahg_hlen is used to determine the number of on-chip entry bytes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * use as the header. This is for cases where the stored header is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * larger than the header to be used in a packet. This is typical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * and RDMA_WRITE_MIDDLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static inline int sdma_txinit_ahg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) u16 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u16 tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) u8 ahg_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) u8 num_ahg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u32 *ahg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u8 ahg_hlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) void (*cb)(struct sdma_txreq *, int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (tlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (tlen > MAX_SDMA_PKT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) tx->desc_limit = ARRAY_SIZE(tx->descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) tx->descp = &tx->descs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) INIT_LIST_HEAD(&tx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tx->num_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tx->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) tx->complete = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) tx->coalesce_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) tx->wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) tx->packet_len = tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) tx->tlen = tx->packet_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) tx->descs[0].qw[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (flags & SDMA_TXREQ_F_AHG_COPY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tx->descs[0].qw[1] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) << SDMA_DESC1_HEADER_INDEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) << SDMA_DESC1_HEADER_MODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * @tx: tx request to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * @flags: flags to key last descriptor additions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * @tlen: total packet length (pbc + headers + data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * @cb: callback pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * The allocation of the sdma_txreq and it enclosing structure is user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * dependent. This routine must be called to initialize the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * independent fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * The currently supported flags is SDMA_TXREQ_F_URGENT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * completion is desired as soon as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Completions of submitted requests can be gotten on selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * txreqs by giving a completion routine callback to sdma_txinit() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * sdma_txinit_ahg(). The environment in which the callback runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * can be from an ISR, a tasklet, or a thread, so no sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * kernel routines can be used. The head size of the sdma ring may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * be locked so care should be taken with locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * The callback pointer can be NULL to avoid any callback for the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * being submitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * The callback, if non-NULL, will be provided this tx and a status. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static inline int sdma_txinit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u16 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u16 tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) void (*cb)(struct sdma_txreq *, int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* helpers - don't use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static inline int sdma_mapping_type(struct sdma_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) >> SDMA_DESC1_GENERATION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static inline size_t sdma_mapping_len(struct sdma_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) >> SDMA_DESC0_BYTE_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) >> SDMA_DESC0_PHY_ADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static inline void make_tx_sdma_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct sdma_desc *desc = &tx->descp[tx->num_desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!tx->num_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* qw[0] zero; qw[1] first, ahg mode already in from init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) << SDMA_DESC1_GENERATION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) desc->qw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) << SDMA_DESC1_GENERATION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) << SDMA_DESC0_PHY_ADDR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) << SDMA_DESC0_BYTE_COUNT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* helper to extend txreq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int type, void *kvaddr, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long offset, u16 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (tx->num_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* helpers used by public routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static inline void _sdma_close_tx(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) tx->descp[tx->num_desc].qw[0] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) SDMA_DESC0_LAST_DESC_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) tx->descp[tx->num_desc].qw[1] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dd->default_desc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (tx->flags & SDMA_TXREQ_F_URGENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) tx->descp[tx->num_desc].qw[1] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) (SDMA_DESC1_HEAD_TO_HOST_FLAG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) SDMA_DESC1_INT_REQ_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static inline int _sdma_txadd_daddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) make_tx_sdma_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) WARN_ON(len > tx->tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) tx->tlen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* special cases for last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!tx->tlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (tx->packet_len & (sizeof(u32) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) rval = _pad_sdma_tx_descs(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) _sdma_close_tx(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) tx->num_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * sdma_txadd_page() - add a page to the sdma_txreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * @dd: the device to use for mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * @tx: tx request to which the page is added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @page: page to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @offset: offset within the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * @len: length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * This is used to add a page/offset/length descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * The mapping/unmapping of the page/offset/len is automatically handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * extend/coalesce descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static inline int sdma_txadd_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((unlikely(tx->num_desc == tx->desc_limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) NULL, page, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (rval <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) addr = dma_map_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) &dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return _sdma_txadd_daddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dd, SDMA_MAP_PAGE, tx, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * sdma_txadd_daddr() - add a dma address to the sdma_txreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * @dd: the device to use for mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @tx: sdma_txreq to which the page is added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * @addr: dma address mapped by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * @len: length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * This is used to add a descriptor for memory that is already dma mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * In this case, there is no unmapping as part of the progress processing for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * this memory location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * 0 - success, -ENOMEM - couldn't extend descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static inline int sdma_txadd_daddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if ((unlikely(tx->num_desc == tx->desc_limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) NULL, NULL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (rval <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * @dd: the device to use for mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @tx: sdma_txreq to which the page is added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @kvaddr: the kernel virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * @len: length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * This is used to add a descriptor referenced by the indicated kvaddr and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * The mapping/unmapping of the kvaddr and len is automatically handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static inline int sdma_txadd_kvaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) void *kvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if ((unlikely(tx->num_desc == tx->desc_limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) kvaddr, NULL, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (rval <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) addr = dma_map_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) &dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) kvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return _sdma_txadd_daddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dd, SDMA_MAP_SINGLE, tx, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct iowait_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int sdma_send_txreq(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct iowait_work *wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bool pkts_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int sdma_send_txlist(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct iowait_work *wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct list_head *tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) u16 *count_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int sdma_ahg_alloc(struct sdma_engine *sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * sdma_build_ahg - build ahg descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * @data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * @dwindex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * @startbit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * @bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Build and return a 32 bit descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static inline u32 sdma_build_ahg_descriptor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) u16 data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u8 dwindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u8 startbit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u8 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ((startbit & SDMA_AHG_FIELD_START_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) SDMA_AHG_FIELD_START_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) SDMA_AHG_FIELD_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ((dwindex & SDMA_AHG_INDEX_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) SDMA_AHG_INDEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ((data & SDMA_AHG_VALUE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) SDMA_AHG_VALUE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * sdma_progress - use seq number of detect head progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @sde: sdma_engine to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @seq: base seq count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @tx: txreq for which we need to check descriptor availability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * This is used in the appropriate spot in the sleep routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * to check for potential ring progress. This routine gets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * seqcount before queuing the iowait structure for progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * If the seqcount indicates that progress needs to be checked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * re-submission is detected by checking whether the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * queue has enough descriptor for the txreq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (read_seqretry(&sde->head_lock, seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) sde->desc_avail = sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (tx->num_desc > sde->desc_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * sdma_iowait_schedule() - initialize wait structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @sde: sdma_engine to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * @wait: wait struct to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * This function initializes the iowait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * structure embedded in the QP or PQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static inline void sdma_iowait_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct iowait *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct hfi1_pportdata *ppd = sde->dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* for use by interrupt handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) void sdma_engine_error(struct sdma_engine *sde, u64 status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * The diagram below details the relationship of the mapping structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * Since the mapping now allows for non-uniform engines per vl, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * number of engines for a vl is either the vl_engines[vl] or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * a computation based on num_sdma/num_vls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * n = roundup to next highest power of 2 using nactual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * In the case where there are num_sdma/num_vls doesn't divide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * evenly, the extras are added from the last vl downward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * For the case where n > nactual, the engines are assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * in a round robin fashion wrapping back to the first engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * for a particular vl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * dd->sdma_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * | sdma_map_elem[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * | +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * v | mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * sdma_vl_map |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * +--------------------------+ | sde[0] -> eng 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * | list (RCU) | |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * |--------------------------| ->| sde[1] -> eng 2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * | mask | --/ |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * |--------------------------| -/ | * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * | actual_vls (max 8) | -/ |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * |--------------------------| --/ | sde[n-1] -> eng n |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * | vls (max 8) | -/ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * |--------------------------| --/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * | map[0] |-/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * |--------------------------| +---------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * | map[1] |--- | mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * |--------------------------| \---- |---------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * | * | \-- | sde[0] -> eng 1+n |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * | * | \---- |---------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * | * | \->| sde[1] -> eng 2+n |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * |--------------------------| |---------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * | map[vls - 1] |- | * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * +--------------------------+ \- |---------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * \- | sde[m-1] -> eng m+n |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * \ +---------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * \-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * \- +----------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * \- | mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * \ |----------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * \- | sde[0] -> eng 1+m+n |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * \- |----------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * >| sde[1] -> eng 2+m+n |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * |----------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * | * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * |----------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * | sde[o-1] -> eng o+m+n|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * +----------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * struct sdma_map_elem - mapping for a vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * @mask - selector mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * @sde - array of engines for this vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * The mask is used to "mod" the selector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * to produce index into the trailing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * array of sdes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct sdma_map_elem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct sdma_engine *sde[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * struct sdma_map_el - mapping for a vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * @engine_to_vl - map of an engine to a vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * @list - rcu head for free callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * @mask - vl mask to "mod" the vl to produce an index to map array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * @actual_vls - number of vls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * @vls - number of vls rounded to next power of 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * @map - array of sdma_map_elem entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * This is the parent mapping structure. The trailing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * members of the struct point to sdma_map_elem entries, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * in turn point to an array of sde's for that vl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct sdma_vl_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct rcu_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) u8 actual_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) u8 vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct sdma_map_elem *map[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int sdma_map_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) u8 num_vls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u8 *vl_engines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* slow path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) void _sdma_engine_progress_schedule(struct sdma_engine *sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * sdma_engine_progress_schedule() - schedule progress on engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * @sde: sdma_engine to schedule progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * This is the fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static inline void sdma_engine_progress_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) _sdma_engine_progress_schedule(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct sdma_engine *sdma_select_engine_sc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u32 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u8 sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct sdma_engine *sdma_select_engine_vl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) u32 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) u8 vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) u32 selector, u8 vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) int sdma_engine_get_vl(struct sdma_engine *sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) unsigned long cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) void sdma_dumpstate(struct sdma_engine *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static inline char *slashstrip(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) char *r = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) while (*s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (*s++ == '/')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) r = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) u16 sdma_get_descq_cnt(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) extern uint mod_num_sdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #endif