^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015 - 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/seqlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "sdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "iowait.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* must be a power of 2 >= 64 <= 32768 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SDMA_DESCQ_CNT 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SDMA_DESC_INTR 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define INVALID_TAIL 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) module_param(sdma_descq_cnt, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static uint sdma_idle_cnt = 250;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) module_param(sdma_idle_cnt, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) uint mod_num_sdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static uint sdma_desct_intr = SDMA_DESC_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SDMA_WAIT_BATCH_SIZE 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* max wait time for a SDMA engine to indicate it has halted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* all SDMA engine errors that cause a halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define SD(name) SEND_DMA_##name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define ALL_SDMA_ENG_HALT_ERRS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* sdma_sendctrl operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define SDMA_SENDCTRL_OP_HALT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* handle long defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static const char * const sdma_state_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) [sdma_state_s00_hw_down] = "s00_HwDown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) [sdma_state_s20_idle] = "s20_Idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) [sdma_state_s99_running] = "s99_Running",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static const char * const sdma_event_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) [sdma_event_e30_go_running] = "e30_GoRunning",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) [sdma_event_e60_hw_halted] = "e60_HwHalted",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) [sdma_event_e70_go_idle] = "e70_GoIdle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) [sdma_event_e85_link_down] = "e85_LinkDown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) [sdma_event_e90_sw_halted] = "e90_SwHalted",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static const struct sdma_set_state_action sdma_action_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) [sdma_state_s00_hw_down] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .go_s99_running_tofalse = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) [sdma_state_s10_hw_start_up_halt_wait] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) .op_halt = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) [sdma_state_s15_hw_start_up_clean_wait] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) .op_intenable = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) .op_cleanup = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) [sdma_state_s20_idle] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .op_intenable = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) [sdma_state_s30_sw_clean_up_wait] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) [sdma_state_s40_hw_clean_up_wait] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .op_cleanup = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) [sdma_state_s50_hw_halt_wait] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) [sdma_state_s60_idle_halt_wait] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .go_s99_running_tofalse = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .op_halt = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) [sdma_state_s80_hw_freeze] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) [sdma_state_s82_freeze_sw_clean] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .op_enable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .op_intenable = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) [sdma_state_s99_running] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .op_enable = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .op_intenable = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .op_halt = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .op_cleanup = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .go_s99_running_totrue = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define SDMA_TAIL_UPDATE_THRESH 0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* declare all statics here rather than keep sorting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void sdma_complete(struct kref *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void sdma_finalput(struct sdma_state *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void sdma_get(struct sdma_state *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void sdma_hw_clean_up_task(struct tasklet_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void sdma_put(struct sdma_state *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void sdma_set_state(struct sdma_engine *, enum sdma_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void sdma_start_hw_clean_up(struct sdma_engine *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void sdma_sw_clean_up_task(struct tasklet_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void sdma_sendctrl(struct sdma_engine *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void init_sdma_regs(struct sdma_engine *, u32, uint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void sdma_process_event(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) enum sdma_events event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void __sdma_process_event(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) enum sdma_events event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void dump_sdma_state(struct sdma_engine *sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void sdma_make_progress(struct sdma_engine *sde, u64 status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void sdma_flush_descq(struct sdma_engine *sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * sdma_state_name() - return state string from enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * @state: state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static const char *sdma_state_name(enum sdma_states state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return sdma_state_names[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void sdma_get(struct sdma_state *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kref_get(&ss->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void sdma_complete(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct sdma_state *ss =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) container_of(kref, struct sdma_state, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) complete(&ss->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void sdma_put(struct sdma_state *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) kref_put(&ss->kref, sdma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void sdma_finalput(struct sdma_state *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sdma_put(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) wait_for_completion(&ss->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static inline void write_sde_csr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 offset0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline u64 read_sde_csr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u32 offset0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * sdma engine 'sde' to drop to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u64 off = 8 * sde->this_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int lcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u64 reg_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u64 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) reg_prev = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (reg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* counter is reest if accupancy count changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (reg != reg_prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) lcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (lcnt++ > 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* timed out - bounce the link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) __func__, sde->this_idx, (u32)reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) queue_work(dd->pport->link_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) &dd->pport->link_bounce_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * sdma_wait() - wait for packet egress to complete for all SDMA engines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * and pause for credit return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void sdma_wait(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (i = 0; i < dd->num_sdma; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct sdma_engine *sde = &dd->per_sdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) sdma_wait_for_packet_egress(sde, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) reg = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) reg &= SD(DESC_CNT_CNT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) reg <<= SD(DESC_CNT_CNT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) write_sde_csr(sde, SD(DESC_CNT), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static inline void complete_tx(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* protect against complete modifying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct iowait *wait = tx->wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) callback_t complete = tx->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) trace_hfi1_sdma_out_sn(sde, tx->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (WARN_ON_ONCE(sde->head_sn != tx->sn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dd_dev_err(sde->dd, "expected %llu got %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) sde->head_sn, tx->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) sde->head_sn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __sdma_txclean(sde->dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) (*complete)(tx, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (iowait_sdma_dec(wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) iowait_drain_wakeup(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Depending on timing there can be txreqs in two places:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * - in the descq ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * - in the flush list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * To avoid ordering issues the descq ring needs to be flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * first followed by the flush list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * This routine is called from two places
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * - From a work queue item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * - Directly from the state machine just before setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * state to running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Must be called with head_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void sdma_flush(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct sdma_txreq *txp, *txp_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) LIST_HEAD(flushlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) uint seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* flush from head to tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sdma_flush_descq(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) spin_lock_irqsave(&sde->flushlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* copy flush list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) list_splice_init(&sde->flushlist, &flushlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) spin_unlock_irqrestore(&sde->flushlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* flush from flush list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) list_for_each_entry_safe(txp, txp_next, &flushlist, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* wakeup QPs orphaned on the dmawait list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct iowait *w, *nw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) seq = read_seqbegin(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!list_empty(&sde->dmawait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) write_seqlock(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (w->wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) w->wakeup(w, SDMA_AVAIL_REASON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) list_del_init(&w->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) write_sequnlock(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) } while (read_seqretry(&sde->waitlock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Fields a work request for flushing the descq ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * and the flush list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * If the engine has been brought to running during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * the scheduling delay, the flush is ignored, assuming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * that the process of bringing the engine to running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * would have done this flush prior to going to running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void sdma_field_flush(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct sdma_engine *sde =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) container_of(work, struct sdma_engine, flush_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) write_seqlock_irqsave(&sde->head_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!__sdma_running(sde))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sdma_flush(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) write_sequnlock_irqrestore(&sde->head_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void sdma_err_halt_wait(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct sdma_engine *sde = container_of(work, struct sdma_engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) u64 statuscsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) statuscsr = read_sde_csr(sde, SD(STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (statuscsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) "SDMA engine %d - timeout waiting for engine to halt\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sde->this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Continue anyway. This could happen if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * an uncorrectable error in the wrong spot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) usleep_range(80, 120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) sdma_process_event(sde, sdma_event_e15_hw_halt_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) for (index = 0; index < dd->num_sdma; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct sdma_engine *curr_sdma = &dd->per_sdma[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (curr_sdma != sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) curr_sdma->progress_check_head =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) curr_sdma->descq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) "SDMA engine %d - check scheduled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) sde->this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mod_timer(&sde->err_progress_check_timer, jiffies + 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void sdma_err_progress_check(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dd_dev_err(sde->dd, "SDE progress check event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) for (index = 0; index < sde->dd->num_sdma; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /* check progress on each engine except the current one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (curr_sde == sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * We must lock interrupts when acquiring sde->lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * to avoid a deadlock if interrupt triggers and spins on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * the same lock on same CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) spin_lock_irqsave(&curr_sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) write_seqlock(&curr_sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* skip non-running queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (curr_sde->state.current_state != sdma_state_s99_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) write_sequnlock(&curr_sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if ((curr_sde->descq_head != curr_sde->descq_tail) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) (curr_sde->descq_head ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) curr_sde->progress_check_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) __sdma_process_event(curr_sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) sdma_event_e90_sw_halted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) write_sequnlock(&curr_sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) schedule_work(&sde->err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void sdma_hw_clean_up_task(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct sdma_engine *sde = from_tasklet(sde, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) sdma_hw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u64 statuscsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) sde->this_idx, slashstrip(__FILE__), __LINE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) statuscsr = read_sde_csr(sde, SD(STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (statuscsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return sde->tx_ring[sde->tx_head & sde->sdma_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * flush ring for recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void sdma_flush_descq(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u16 head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct sdma_txreq *txp = get_txhead(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* The reason for some of the complexity of this code is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * not all descriptors have corresponding txps. So, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * be able to skip over descs until we wander into the range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * the next txp on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) head = sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) tail = sde->descq_tail & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) while (head != tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* advance head, wrap if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) head = ++sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* if now past this txp's descs, do the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (txp && txp->next_descq_idx == head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* remove from list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) trace_hfi1_sdma_progress(sde, head, tail, txp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) txp = get_txhead(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) sdma_desc_avail(sde, sdma_descq_freecnt(sde));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static void sdma_sw_clean_up_task(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) spin_lock_irqsave(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) write_seqlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * At this point, the following should always be true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * - We are halted, so no more descriptors are getting retired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * - We are not running, so no one is submitting new work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * - Only we can send the e40_sw_cleaned, so we can't start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * running again until we say so. So, the active list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * descq are ours to play with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * In the error clean up sequence, software clean must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * before the hardware clean so we can use the hardware head in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * the progress routine. A hardware clean or SPC unfreeze will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * reset the hardware head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * Process all retired requests. The progress routine will use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * latest physical hardware head - we are not running so speed does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * not matter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sdma_make_progress(sde, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) sdma_flush(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Reset our notion of head and tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Note that the HW registers have been reset via an earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) sde->descq_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sde->descq_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sde->desc_avail = sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *sde->head_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) write_sequnlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) spin_unlock_irqrestore(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static void sdma_sw_tear_down(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct sdma_state *ss = &sde->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Releasing this reference means the state machine has stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) sdma_put(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* stop waiting for all unfreeze events to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) atomic_set(&sde->dd->sdma_unfreeze_count, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static void sdma_start_hw_clean_up(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static void sdma_set_state(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) enum sdma_states next_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct sdma_state *ss = &sde->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) const struct sdma_set_state_action *action = sdma_action_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) trace_hfi1_sdma_state(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) sdma_state_names[ss->current_state],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) sdma_state_names[next_state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* debugging bookkeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ss->previous_state = ss->current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ss->previous_op = ss->current_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ss->current_state = next_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (ss->previous_state != sdma_state_s99_running &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) next_state == sdma_state_s99_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) sdma_flush(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (action[next_state].op_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) op |= SDMA_SENDCTRL_OP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (action[next_state].op_intenable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) op |= SDMA_SENDCTRL_OP_INTENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (action[next_state].op_halt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) op |= SDMA_SENDCTRL_OP_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (action[next_state].op_cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) op |= SDMA_SENDCTRL_OP_CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (action[next_state].go_s99_running_tofalse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (action[next_state].go_s99_running_totrue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ss->current_op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) sdma_sendctrl(sde, ss->current_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * sdma_get_descq_cnt() - called when device probed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * Return a validated descq count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * This is currently only used in the verbs initialization to build the tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * This will probably be deleted in favor of a more scalable approach to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * alloc tx's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) u16 sdma_get_descq_cnt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) u16 count = sdma_descq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return SDMA_DESCQ_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* count must be a power of 2 greater than 64 and less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * 32768. Otherwise return default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!is_power_of_2(count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return SDMA_DESCQ_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (count < 64 || count > 32768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return SDMA_DESCQ_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * sdma_engine_get_vl() - return vl for a given sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * @sde: sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * This function returns the vl mapped to a given engine, or an error if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * the mapping can't be found. The mapping fields are protected by RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int sdma_engine_get_vl(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct sdma_vl_map *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u8 vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) m = rcu_dereference(dd->sdma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (unlikely(!m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) vl = m->engine_to_vl[sde->this_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * sdma_select_engine_vl() - select sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * @dd: devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @selector: a spreading factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * @vl: this vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * This function returns an engine based on the selector and a vl. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * mapping fields are protected by RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct sdma_engine *sdma_select_engine_vl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u32 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) u8 vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct sdma_vl_map *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct sdma_map_elem *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct sdma_engine *rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* NOTE This should only happen if SC->VL changed after the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * checks on the QP/AH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * Default will return engine 0 below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (vl >= num_vls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rval = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) m = rcu_dereference(dd->sdma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (unlikely(!m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return &dd->per_sdma[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) e = m->map[vl & m->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) rval = e->sde[selector & e->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) rval = !rval ? &dd->per_sdma[0] : rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * sdma_select_engine_sc() - select sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @dd: devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * @selector: a spreading factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * @sc5: the 5 bit sc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * This function returns an engine based on the selector and an sc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct sdma_engine *sdma_select_engine_sc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u32 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u8 sc5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u8 vl = sc_to_vlt(dd, sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return sdma_select_engine_vl(dd, selector, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct sdma_rht_map_elem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u8 ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct sdma_engine *sde[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct sdma_rht_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned long cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct rhash_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #define NR_CPUS_HINT 192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static const struct rhashtable_params sdma_rht_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) .nelem_hint = NR_CPUS_HINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) .head_offset = offsetof(struct sdma_rht_node, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) .key_offset = offsetof(struct sdma_rht_node, cpu_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) .key_len = sizeof_field(struct sdma_rht_node, cpu_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) .max_size = NR_CPUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) .min_size = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) .automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * sdma_select_user_engine() - select sdma engine based on user setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * @dd: devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * @selector: a spreading factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * @vl: this vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * This function returns an sdma engine for a user sdma request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * User defined sdma engine affinity setting is honored when applicable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * otherwise system default sdma engine mapping is used. To ensure correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * ordering, the mapping from <selector, vl> to sde must remain unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 selector, u8 vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct sdma_rht_node *rht_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct sdma_engine *sde = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) unsigned long cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * To ensure that always the same sdma engine(s) will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * selected make sure the process is pinned to this CPU only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (current->nr_cpus_allowed != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) cpu_id = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (rht_node && rht_node->map[vl]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct sdma_rht_map_elem *map = rht_node->map[vl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) sde = map->sde[selector & map->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return sdma_select_engine_vl(dd, selector, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) map->sde[map->ctr + i] = map->sde[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned int i, pow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* only need to check the first ctr entries for a match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) for (i = 0; i < map->ctr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (map->sde[i] == sde) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) memmove(&map->sde[i], &map->sde[i + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) (map->ctr - i - 1) * sizeof(map->sde[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) map->ctr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) pow = roundup_pow_of_two(map->ctr ? : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) map->mask = pow - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) sdma_populate_sde_map(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Prevents concurrent reads and writes of the sdma engine cpu_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static DEFINE_MUTEX(process_to_sde_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) cpumask_var_t mask, new_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int ret, vl, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct sdma_rht_node *rht_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) vl = sdma_engine_get_vl(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ret = cpulist_parse(buf, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (!cpumask_subset(mask, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dd_dev_warn(sde->dd, "Invalid CPU mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) sz = sizeof(struct sdma_rht_map_elem) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) mutex_lock(&process_to_sde_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* Check if we have this already mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) cpumask_set_cpu(cpu, new_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!rht_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!rht_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (!rht_node->map[vl]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) kfree(rht_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) rht_node->cpu_id = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rht_node->map[vl]->mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) rht_node->map[vl]->ctr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) rht_node->map[vl]->sde[0] = sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ret = rhashtable_insert_fast(dd->sdma_rht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) &rht_node->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) kfree(rht_node->map[vl]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) kfree(rht_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) int ctr, pow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Add new user mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!rht_node->map[vl])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!rht_node->map[vl]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) rht_node->map[vl]->ctr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ctr = rht_node->map[vl]->ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) rht_node->map[vl]->sde[ctr - 1] = sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) pow = roundup_pow_of_two(ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) rht_node->map[vl]->mask = pow - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Populate the sde map table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sdma_populate_sde_map(rht_node->map[vl]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) cpumask_set_cpu(cpu, new_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Clean up old mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) for_each_cpu(cpu, cpu_online_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct sdma_rht_node *rht_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* Don't cleanup sdes that are set in the new mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (cpumask_test_cpu(cpu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (rht_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) bool empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* Remove mappings for old sde */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (rht_node->map[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sdma_cleanup_sde_map(rht_node->map[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* Free empty hash table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!rht_node->map[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (rht_node->map[i]->ctr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) empty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ret = rhashtable_remove_fast(dd->sdma_rht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) &rht_node->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) kfree(rht_node->map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) kfree(rht_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) cpumask_copy(&sde->cpu_mask, new_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) mutex_unlock(&process_to_sde_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) free_cpumask_var(new_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return ret ? : strnlen(buf, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mutex_lock(&process_to_sde_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (cpumask_empty(&sde->cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) snprintf(buf, PAGE_SIZE, "%s\n", "empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) mutex_unlock(&process_to_sde_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return strnlen(buf, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void sdma_rht_free(void *ptr, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct sdma_rht_node *rht_node = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) kfree(rht_node->map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) kfree(rht_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * @s: seq file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * @cpuid: cpu id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * This routine dumps the process to sde mappings per cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) void sdma_seqfile_dump_cpu_list(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) unsigned long cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct sdma_rht_node *rht_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (!rht_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) seq_printf(s, "cpu%3lu: ", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!rht_node->map[i] || !rht_node->map[i]->ctr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) seq_printf(s, " vl%d: [", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) for (j = 0; j < rht_node->map[i]->ctr; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!rht_node->map[i]->sde[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (j > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) seq_puts(s, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) seq_printf(s, " sdma%2d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) rht_node->map[i]->sde[j]->this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) seq_puts(s, " ]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Free the indicated map struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static void sdma_map_free(struct sdma_vl_map *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) for (i = 0; m && i < m->actual_vls; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) kfree(m->map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) kfree(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * Handle RCU callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static void sdma_map_rcu_callback(struct rcu_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) sdma_map_free(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * sdma_map_init - called when # vls change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * @port: port number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * @num_vls: number of vls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * @vl_engines: per vl engine mapping (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * This routine changes the mapping based on the number of vls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * vl_engines is used to specify a non-uniform vl/engine loading. NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * implies auto computing the loading and giving each VLs a uniform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * distribution of engines per VL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * The auto algorithm computes the sde_per_vl and the number of extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * engines. Any extra engines are added from the last VL on down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * rcu locking is used here to control access to the mapping fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * If either the num_vls or num_sdma are non-power of 2, the array sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * up to the next highest power of 2 and the first entry is reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * in a round robin fashion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * If an error occurs the map change is not done and the mapping is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * not changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int extra, sde_per_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int engine = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) u8 lvl_engines[OPA_MAX_VLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct sdma_vl_map *oldmap, *newmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (!(dd->flags & HFI1_HAS_SEND_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!vl_engines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* truncate divide */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) sde_per_vl = dd->num_sdma / num_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /* extras */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) extra = dd->num_sdma % num_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) vl_engines = lvl_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* add extras from last vl down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) for (i = num_vls - 1; i >= 0; i--, extra--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* build new map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) newmap = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) sizeof(struct sdma_vl_map) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) roundup_pow_of_two(num_vls) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) sizeof(struct sdma_map_elem *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!newmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) newmap->actual_vls = num_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) newmap->vls = roundup_pow_of_two(num_vls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) newmap->mask = (1 << ilog2(newmap->vls)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* initialize back-map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) newmap->engine_to_vl[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (i = 0; i < newmap->vls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* save for wrap around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) int first_engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (i < newmap->actual_vls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) int sz = roundup_pow_of_two(vl_engines[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* only allocate once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) newmap->map[i] = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) sizeof(struct sdma_map_elem) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) sz * sizeof(struct sdma_engine *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (!newmap->map[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* assign engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) for (j = 0; j < sz; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) newmap->map[i]->sde[j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) &dd->per_sdma[engine];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (++engine >= first_engine + vl_engines[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* wrap back to first engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) engine = first_engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* assign back-map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (j = 0; j < vl_engines[i]; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) newmap->engine_to_vl[first_engine + j] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* just re-use entry without allocating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) newmap->map[i] = newmap->map[i % num_vls];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) engine = first_engine + vl_engines[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* newmap in hand, save old map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) spin_lock_irq(&dd->sde_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) oldmap = rcu_dereference_protected(dd->sdma_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) lockdep_is_held(&dd->sde_map_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* publish newmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) rcu_assign_pointer(dd->sdma_map, newmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) spin_unlock_irq(&dd->sde_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* success, free any old map after grace period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (oldmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) call_rcu(&oldmap->list, sdma_map_rcu_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* free any partial allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) sdma_map_free(newmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * sdma_clean() Clean up allocated memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * @dd: struct hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * @num_engines: num sdma engines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * This routine can be called regardless of the success of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * sdma_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (dd->sdma_pad_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) (void *)dd->sdma_pad_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) dd->sdma_pad_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) dd->sdma_pad_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) dd->sdma_pad_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (dd->sdma_heads_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) (void *)dd->sdma_heads_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) dd->sdma_heads_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dd->sdma_heads_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) dd->sdma_heads_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) for (i = 0; dd->per_sdma && i < num_engines; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sde = &dd->per_sdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) sde->head_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) sde->head_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (sde->descq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dma_free_coherent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) &dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) sde->descq_cnt * sizeof(u64[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) sde->descq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) sde->descq_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) sde->descq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) sde->descq_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) kvfree(sde->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) sde->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) spin_lock_irq(&dd->sde_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) sdma_map_free(rcu_access_pointer(dd->sdma_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) RCU_INIT_POINTER(dd->sdma_map, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) spin_unlock_irq(&dd->sde_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) kfree(dd->per_sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) dd->per_sdma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (dd->sdma_rht) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) kfree(dd->sdma_rht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) dd->sdma_rht = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * sdma_init() - called when device probed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * @port: port number (currently only zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * Initializes each sde and its csrs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * Interrupts are not required to be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * 0 - success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int sdma_init(struct hfi1_devdata *dd, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unsigned this_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct rhashtable *tmp_sdma_rht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) u16 descq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) void *curr_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct hfi1_pportdata *ppd = dd->pport + port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u32 per_sdma_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) uint idle_cnt = sdma_idle_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) size_t num_engines = chip_sdma_engines(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (!HFI1_CAP_IS_KSET(SDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) HFI1_CAP_CLEAR(SDMA_AHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (mod_num_sdma &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* can't exceed chip support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) mod_num_sdma <= chip_sdma_engines(dd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* count must be >= vls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) mod_num_sdma >= num_vls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) num_engines = mod_num_sdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) chip_sdma_mem_size(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) per_sdma_credits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /* set up freeze waitqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) init_waitqueue_head(&dd->sdma_unfreeze_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) atomic_set(&dd->sdma_unfreeze_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) descq_cnt = sdma_get_descq_cnt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) num_engines, descq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* alloc memory for array of send engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) GFP_KERNEL, dd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!dd->per_sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) idle_cnt = ns_to_cclock(dd, idle_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (idle_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) dd->default_desc1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) SDMA_DESC1_HEAD_TO_HOST_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dd->default_desc1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) SDMA_DESC1_INT_REQ_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (!sdma_desct_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) sdma_desct_intr = SDMA_DESC_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* Allocate memory for SendDMA descriptor FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) for (this_idx = 0; this_idx < num_engines; ++this_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) sde = &dd->per_sdma[this_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) sde->dd = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) sde->ppd = ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) sde->this_idx = this_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) sde->descq_cnt = descq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) sde->desc_avail = sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) sde->sdma_shift = ilog2(descq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) sde->sdma_mask = (1 << sde->sdma_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* Create a mask specifically for each interrupt source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* Create a combined mask to cover all 3 interrupt sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) sde->imask = sde->int_mask | sde->progress_mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sde->idle_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) spin_lock_init(&sde->tail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) seqlock_init(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) spin_lock_init(&sde->senddmactrl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) spin_lock_init(&sde->flushlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) seqlock_init(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* insure there is always a zero bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) sde->ahg_bits = 0xfffffffe00000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* set up reference counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) kref_init(&sde->state.kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) init_completion(&sde->state.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) INIT_LIST_HEAD(&sde->flushlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) INIT_LIST_HEAD(&sde->dmawait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) sde->tail_csr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) tasklet_setup(&sde->sdma_hw_clean_up_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) sdma_hw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) tasklet_setup(&sde->sdma_sw_clean_up_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) INIT_WORK(&sde->flush_worker, sdma_field_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) sde->progress_check_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) timer_setup(&sde->err_progress_check_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) sdma_err_progress_check, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) descq_cnt * sizeof(u64[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) &sde->descq_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!sde->descq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) sde->tx_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) kvzalloc_node(array_size(descq_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sizeof(struct sdma_txreq *)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) GFP_KERNEL, dd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!sde->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* Allocate memory for DMA of head registers to memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) dd->sdma_heads_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) &dd->sdma_heads_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (!dd->sdma_heads_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /* Allocate memory for pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) &dd->sdma_pad_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (!dd->sdma_pad_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* assign each engine to different cacheline and init registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) curr_head = (void *)dd->sdma_heads_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) for (this_idx = 0; this_idx < num_engines; ++this_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) unsigned long phys_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) sde = &dd->per_sdma[this_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) sde->head_dma = curr_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) curr_head += L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) phys_offset = (unsigned long)sde->head_dma -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) (unsigned long)dd->sdma_heads_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) sde->head_phys = dd->sdma_heads_phys + phys_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) init_sdma_regs(sde, per_sdma_credits, idle_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) dd->flags |= HFI1_HAS_SEND_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dd->num_sdma = num_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (!tmp_sdma_rht) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) kfree(tmp_sdma_rht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) dd->sdma_rht = tmp_sdma_rht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) sdma_clean(dd, num_engines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * sdma_all_running() - called when the link goes up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * This routine moves all engines to the running state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) void sdma_all_running(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* move all engines to running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) for (i = 0; i < dd->num_sdma; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) sde = &dd->per_sdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) sdma_process_event(sde, sdma_event_e30_go_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * sdma_all_idle() - called when the link goes down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * This routine moves all engines to the idle state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) void sdma_all_idle(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* idle all engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) for (i = 0; i < dd->num_sdma; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) sde = &dd->per_sdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) sdma_process_event(sde, sdma_event_e70_go_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * sdma_start() - called to kick off state processing for all engines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * This routine is for kicking off the state processing for all required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * sdma engines. Interrupts need to be working at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) void sdma_start(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* kick off the engines state processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) for (i = 0; i < dd->num_sdma; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) sde = &dd->per_sdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) sdma_process_event(sde, sdma_event_e10_go_hw_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * sdma_exit() - used when module is removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * @dd: hfi1_devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) void sdma_exit(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned this_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ++this_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) sde = &dd->per_sdma[this_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (!list_empty(&sde->dmawait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) sde->this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) sdma_process_event(sde, sdma_event_e00_go_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) del_timer_sync(&sde->err_progress_check_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * This waits for the state machine to exit so it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * necessary to kill the sdma_sw_clean_up_task to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * it is not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) sdma_finalput(&sde->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * unmap the indicated descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static inline void sdma_unmap_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct sdma_desc *descp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) switch (sdma_mapping_type(descp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) case SDMA_MAP_SINGLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) dma_unmap_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) &dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) sdma_mapping_addr(descp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) sdma_mapping_len(descp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) case SDMA_MAP_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) dma_unmap_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) &dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) sdma_mapping_addr(descp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) sdma_mapping_len(descp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * return the mode as indicated by the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * descriptor in the tx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static inline u8 ahg_mode(struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) >> SDMA_DESC1_HEADER_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * @dd: hfi1_devdata for unmapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * @tx: tx request to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * This is used in the progress routine to clean the tx or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * by the ULP to toss an in-process tx build.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * The code can be called multiple times without issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) void __sdma_txclean(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (tx->num_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) u8 skip = 0, mode = ahg_mode(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* unmap first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) sdma_unmap_desc(dd, &tx->descp[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /* determine number of AHG descriptors to skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (mode > SDMA_AHG_APPLY_UPDATE1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) skip = mode >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) for (i = 1 + skip; i < tx->num_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) sdma_unmap_desc(dd, &tx->descp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) tx->num_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) kfree(tx->coalesce_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) tx->coalesce_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* kmalloc'ed descp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) tx->desc_limit = ARRAY_SIZE(tx->descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) kfree(tx->descp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static inline u16 sdma_gethead(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) int use_dmahead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) u16 hwhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) hwhead = use_dmahead ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) (u16)le64_to_cpu(*sde->head_dma) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) (u16)read_sde_csr(sde, SD(HEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) u16 cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) u16 swtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) u16 swhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) int sane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) swhead = sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /* this code is really bad for cache line trading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) cnt = sde->descq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (swhead < swtail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /* not wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) sane = (hwhead >= swhead) & (hwhead <= swtail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) else if (swhead > swtail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /* wrapped around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) (hwhead <= swtail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /* empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) sane = (hwhead == swhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (unlikely(!sane)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) use_dmahead ? "dma" : "kreg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) hwhead, swhead, swtail, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (use_dmahead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) /* try one more time, using csr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) use_dmahead = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /* proceed as if no progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) hwhead = swhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return hwhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * This is called when there are send DMA descriptors that might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * This is called with head_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct iowait *wait, *nw, *twait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) uint i, n = 0, seq, tidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) dd_dev_err(sde->dd, "avail: %u\n", avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) seq = read_seqbegin(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (!list_empty(&sde->dmawait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /* at least one item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) write_seqlock(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /* Harvest waiters wanting DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) list_for_each_entry_safe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) nw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) &sde->dmawait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) u32 num_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (!wait->wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (n == ARRAY_SIZE(waits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) iowait_init_priority(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) num_desc = iowait_get_all_desc(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (num_desc > avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) avail -= num_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /* Find the top-priority wait memeber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) twait = waits[tidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) tidx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) iowait_priority_update_top(wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) twait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) tidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) list_del_init(&wait->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) waits[n++] = wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) write_sequnlock(&sde->waitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) } while (read_seqretry(&sde->waitlock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) /* Schedule the top-priority entry first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (i != tidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* head_lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static void sdma_make_progress(struct sdma_engine *sde, u64 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct sdma_txreq *txp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) u16 hwhead, swhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) int idle_check_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) hwhead = sdma_gethead(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /* The reason for some of the complexity of this code is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * not all descriptors have corresponding txps. So, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * be able to skip over descs until we wander into the range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * the next txp on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) txp = get_txhead(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) swhead = sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) while (swhead != hwhead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* advance head, wrap if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) swhead = ++sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* if now past this txp's descs, do the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (txp && txp->next_descq_idx == swhead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* remove from list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) complete_tx(sde, txp, SDMA_TXREQ_S_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /* see if there is another txp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) txp = get_txhead(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * The SDMA idle interrupt is not guaranteed to be ordered with respect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * to updates to the the dma_head location in host memory. The head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * value read might not be fully up to date. If there are pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * descriptors and the SDMA idle interrupt fired then read from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * CSR SDMA head instead to get the latest value from the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * The hardware SDMA head should be read at most once in this invocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * of sdma_make_progress(..) which is ensured by idle_check_done flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if ((status & sde->idle_mask) && !idle_check_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) u16 swtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (swtail != hwhead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) hwhead = (u16)read_sde_csr(sde, SD(HEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) idle_check_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) sde->last_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) sdma_desc_avail(sde, sdma_descq_freecnt(sde));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * sdma_engine_interrupt() - interrupt handler for engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * @sde: sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * @status: sdma interrupt reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * Status is a mask of the 3 possible interrupts for this engine. It will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * contain bits _only_ for this SDMA engine. It will contain at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * bit, it may contain more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) trace_hfi1_sdma_engine_interrupt(sde, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) write_seqlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) sdma_set_desc_cnt(sde, sdma_desct_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (status & sde->idle_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sde->idle_int_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) else if (status & sde->progress_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) sde->progress_int_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) else if (status & sde->int_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) sde->sdma_int_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) sdma_make_progress(sde, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) write_sequnlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * sdma_engine_error() - error handler for engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * @sde: sdma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * @status: sdma interrupt reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) void sdma_engine_error(struct sdma_engine *sde, u64 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) (unsigned long long)status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) sdma_state_names[sde->state.current_state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) spin_lock_irqsave(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) write_seqlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (status & ALL_SDMA_ENG_HALT_ERRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) __sdma_process_event(sde, sdma_event_e60_hw_halted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) "SDMA (%u) engine error: 0x%llx state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) (unsigned long long)status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) sdma_state_names[sde->state.current_state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) dump_sdma_state(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) write_sequnlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) spin_unlock_irqrestore(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) u64 set_senddmactrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) u64 clr_senddmactrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (op & SDMA_SENDCTRL_OP_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (op & SDMA_SENDCTRL_OP_INTENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (op & SDMA_SENDCTRL_OP_HALT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) spin_lock_irqsave(&sde->senddmactrl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) sde->p_senddmactrl |= set_senddmactrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) sde->p_senddmactrl &= ~clr_senddmactrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (op & SDMA_SENDCTRL_OP_CLEANUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) write_sde_csr(sde, SD(CTRL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) sde->p_senddmactrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) SD(CTRL_SDMA_CLEANUP_SMASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) sdma_dumpstate(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static void sdma_setlengen(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * Set SendDmaLenGen and clear-then-set the MSB of the generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * count to enable generation checking and load the internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * generation counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) write_sde_csr(sde, SD(LEN_GEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) write_sde_csr(sde, SD(LEN_GEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) /* Commit writes to memory and advance the tail on the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) smp_wmb(); /* see get_txhead() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) writeq(tail, sde->tail_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * This is called when changing to state s10_hw_start_up_halt_wait as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * a result of send buffer errors or send DMA descriptor errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) static void sdma_hw_start_up(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) sdma_setlengen(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) sdma_update_tail(sde, 0); /* Set SendDmaTail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) *sde->head_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * set_sdma_integrity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) static void set_sdma_integrity(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) write_sde_csr(sde, SD(CHECK_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) hfi1_pkt_base_sdma_integrity(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static void init_sdma_regs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) u32 credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) uint idle_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) u8 opval, opmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) sdma_setlengen(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) sdma_update_tail(sde, 0); /* Set SendDmaTail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) write_sde_csr(sde, SD(DESC_CNT), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) write_sde_csr(sde, SD(MEMORY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) ((u64)(credits * sde->this_idx) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) set_sdma_integrity(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) opmask = OPCODE_CHECK_MASK_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) opval = OPCODE_CHECK_VAL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) write_sde_csr(sde, SD(CHECK_OPCODE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) #define sdma_dumpstate_helper0(reg) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) csr = read_csr(sde->dd, reg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) #define sdma_dumpstate_helper(reg) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) csr = read_sde_csr(sde, reg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) #reg, sde->this_idx, csr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) #define sdma_dumpstate_helper2(reg) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) csr = read_csr(sde->dd, reg + (8 * i)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) #reg, i, csr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) void sdma_dumpstate(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) u64 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) sdma_dumpstate_helper(SD(CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) sdma_dumpstate_helper(SD(STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) sdma_dumpstate_helper0(SD(ERR_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) sdma_dumpstate_helper0(SD(ERR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) sdma_dumpstate_helper(SD(ENG_ERR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) sdma_dumpstate_helper2(CCE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) sdma_dumpstate_helper2(CCE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) sdma_dumpstate_helper2(CCE_INT_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) sdma_dumpstate_helper(SD(TAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) sdma_dumpstate_helper(SD(HEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) sdma_dumpstate_helper(SD(PRIORITY_THLD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) sdma_dumpstate_helper(SD(IDLE_CNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) sdma_dumpstate_helper(SD(RELOAD_CNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) sdma_dumpstate_helper(SD(DESC_CNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) sdma_dumpstate_helper(SD(MEMORY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) sdma_dumpstate_helper0(SD(ENGINES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) sdma_dumpstate_helper0(SD(MEM_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) sdma_dumpstate_helper(SD(BASE_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) sdma_dumpstate_helper(SD(LEN_GEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) sdma_dumpstate_helper(SD(HEAD_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) sdma_dumpstate_helper(SD(CHECK_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) sdma_dumpstate_helper(SD(CHECK_VL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) sdma_dumpstate_helper(SD(CHECK_SLID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) sdma_dumpstate_helper(SD(CHECK_OPCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static void dump_sdma_state(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct hw_sdma_desc *descqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) u64 desc[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) u8 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) u16 head, tail, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) head = sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) tail = sde->descq_tail & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) cnt = sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) sde->this_idx, head, tail, cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) !list_empty(&sde->flushlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /* print info for each entry in the descriptor queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) while (head != tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) char flags[6] = { 'x', 'x', 'x', 'x', 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) descqp = &sde->descq[head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) desc[0] = le64_to_cpu(descqp->qw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) desc[1] = le64_to_cpu(descqp->qw[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 'H' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) & SDMA_DESC0_PHY_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) & SDMA_DESC1_GENERATION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) & SDMA_DESC0_BYTE_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) head, flags, addr, gen, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) "\tdesc0:0x%016llx desc1 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) desc[0], desc[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) dd_dev_err(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) "\taidx: %u amode: %u alen: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) (u8)((desc[1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) SDMA_DESC1_HEADER_INDEX_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) SDMA_DESC1_HEADER_INDEX_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) (u8)((desc[1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) SDMA_DESC1_HEADER_MODE_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) SDMA_DESC1_HEADER_MODE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) (u8)((desc[1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) SDMA_DESC1_HEADER_DWS_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) SDMA_DESC1_HEADER_DWS_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) head &= sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) #define SDE_FMT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * sdma_seqfile_dump_sde() - debugfs dump of sde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) * @s: seq file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * @sde: send dma engine to dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) * This routine dumps the sde to the indicated seq file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) u16 head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) struct hw_sdma_desc *descqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) u64 desc[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) u8 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) head = sde->descq_head & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) seq_printf(s, SDE_FMT, sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) sde->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) sdma_state_name(sde->state.current_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) (unsigned long long)read_sde_csr(sde, SD(CTRL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) (unsigned long long)read_sde_csr(sde, SD(STATUS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) (unsigned long long)le64_to_cpu(*sde->head_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) (unsigned long long)sde->last_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) (unsigned long long)sde->ahg_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) sde->tx_tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) sde->tx_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) sde->descq_tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) sde->descq_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) !list_empty(&sde->flushlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) sde->descq_full_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) /* print info for each entry in the descriptor queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) while (head != tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) char flags[6] = { 'x', 'x', 'x', 'x', 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) descqp = &sde->descq[head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) desc[0] = le64_to_cpu(descqp->qw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) desc[1] = le64_to_cpu(descqp->qw[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 'H' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) & SDMA_DESC0_PHY_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) & SDMA_DESC1_GENERATION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) & SDMA_DESC0_BYTE_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) head, flags, addr, gen, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) (u8)((desc[1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) SDMA_DESC1_HEADER_INDEX_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) SDMA_DESC1_HEADER_INDEX_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) (u8)((desc[1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) SDMA_DESC1_HEADER_MODE_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) SDMA_DESC1_HEADER_MODE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) head = (head + 1) & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * add the generation number into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * the qw1 and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) << SDMA_DESC1_GENERATION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return qw1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * This routine submits the indicated tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * Space has already been guaranteed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * tail side of ring is locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * The hardware tail update is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) * in the caller and that is facilitated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * by returning the new tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * There is special case logic for ahg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * to not add the generation number for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * up to 2 descriptors that follow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * first descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) u16 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) struct sdma_desc *descp = tx->descp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) u8 skip = 0, mode = ahg_mode(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) tail = sde->descq_tail & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) tail, &sde->descq[tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) tail = ++sde->descq_tail & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) descp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) if (mode > SDMA_AHG_APPLY_UPDATE1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) skip = mode >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) for (i = 1; i < tx->num_desc; i++, descp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) u64 qw1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) /* edits don't have generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) qw1 = descp->qw[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) /* replace generation with real one for non-edits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) qw1 = add_gen(sde, descp->qw[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) sde->descq[tail].qw[1] = cpu_to_le64(qw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) tail, &sde->descq[tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) tail = ++sde->descq_tail & sde->sdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) tx->next_descq_idx = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) tx->sn = sde->tail_sn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) trace_hfi1_sdma_in_sn(sde, tx->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) sde->desc_avail -= tx->num_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) return tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * Check for progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static int sdma_check_progress(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) struct iowait_work *wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) bool pkts_sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) sde->desc_avail = sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (tx->num_desc <= sde->desc_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* pulse the head_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (wait && iowait_ioww_to_iow(wait)->sleep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) seq = raw_seqcount_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) (const seqcount_t *)&sde->head_lock.seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) sde->desc_avail = sdma_descq_freecnt(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * sdma_send_txreq() - submit a tx req to ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * @sde: sdma engine to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * @wait: SE wait structure to use when full (may be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * @tx: sdma_txreq to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * @pkts_sent: has any packet been sent yet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * The call submits the tx into the ring. If a iowait structure is non-NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * the packet will be queued to the list in wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * ring (wait == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) int sdma_send_txreq(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) struct iowait_work *wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) bool pkts_sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) u16 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) /* user should have supplied entire packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (unlikely(tx->tlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) tx->wait = iowait_ioww_to_iow(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) spin_lock_irqsave(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (unlikely(!__sdma_running(sde)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) goto unlock_noconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (unlikely(tx->num_desc > sde->desc_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) goto nodesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) tail = submit_tx(sde, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) iowait_sdma_inc(iowait_ioww_to_iow(wait));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) sdma_update_tail(sde, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) spin_unlock_irqrestore(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) unlock_noconn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) iowait_sdma_inc(iowait_ioww_to_iow(wait));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) tx->next_descq_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) tx->sn = sde->tail_sn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) trace_hfi1_sdma_in_sn(sde, tx->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) spin_lock(&sde->flushlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) list_add_tail(&tx->list, &sde->flushlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) spin_unlock(&sde->flushlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) iowait_inc_wait_count(wait, tx->num_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) ret = -ECOMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) nodesc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) ret = sdma_check_progress(sde, wait, tx, pkts_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) sde->descq_full_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * sdma_send_txlist() - submit a list of tx req to ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * @sde: sdma engine to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * @wait: SE wait structure to use when full (may be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * @tx_list: list of sdma_txreqs to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * @count: pointer to a u16 which, after return will contain the total number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * which are added to SDMA engine flush list if the SDMA engine state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) * not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * The call submits the list into the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * If the iowait structure is non-NULL and not equal to the iowait list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) * the unprocessed part of the list will be appended to the list in wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) * In all cases, the tx_list will be updated so the head of the tx_list is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) * the list of descriptors that have yet to be transmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) * The intent of this call is to provide a more efficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) * way of submitting multiple packets to SDMA while holding the tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) * side locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * 0 - Success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) struct list_head *tx_list, u16 *count_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) struct sdma_txreq *tx, *tx_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) u16 tail = INVALID_TAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) u32 submit_count = 0, flush_count = 0, total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) spin_lock_irqsave(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) list_for_each_entry_safe(tx, tx_next, tx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) tx->wait = iowait_ioww_to_iow(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (unlikely(!__sdma_running(sde)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) goto unlock_noconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (unlikely(tx->num_desc > sde->desc_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) goto nodesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (unlikely(tx->tlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) goto update_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) list_del_init(&tx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) tail = submit_tx(sde, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) submit_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) if (tail != INVALID_TAIL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) sdma_update_tail(sde, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) tail = INVALID_TAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) update_tail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) total_count = submit_count + flush_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) iowait_starve_clear(submit_count > 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) iowait_ioww_to_iow(wait));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (tail != INVALID_TAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) sdma_update_tail(sde, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) spin_unlock_irqrestore(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) *count_out = total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) unlock_noconn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) spin_lock(&sde->flushlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) list_for_each_entry_safe(tx, tx_next, tx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) tx->wait = iowait_ioww_to_iow(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) list_del_init(&tx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) tx->next_descq_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) tx->sn = sde->tail_sn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) trace_hfi1_sdma_in_sn(sde, tx->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) list_add_tail(&tx->list, &sde->flushlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) flush_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) iowait_inc_wait_count(wait, tx->num_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) spin_unlock(&sde->flushlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) ret = -ECOMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) goto update_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) nodesc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) sde->descq_full_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) goto update_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) spin_lock_irqsave(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) write_seqlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) __sdma_process_event(sde, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (sde->state.current_state == sdma_state_s99_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) sdma_desc_avail(sde, sdma_descq_freecnt(sde));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) write_sequnlock(&sde->head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) spin_unlock_irqrestore(&sde->tail_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) static void __sdma_process_event(struct sdma_engine *sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) enum sdma_events event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct sdma_state *ss = &sde->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) int need_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) /* CONFIG SDMA temporary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) sdma_state_names[ss->current_state],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) sdma_event_names[event]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) switch (ss->current_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) case sdma_state_s00_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * If down, but running requested (usually result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * of link up, then we need to start up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) * This can happen when hw down is requested while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * bringing the link up with traffic active on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) * 7220, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) fallthrough; /* and start dma engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /* This reference means the state machine is started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) sdma_get(&sde->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) sdma_set_state(sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) sdma_state_s10_hw_start_up_halt_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) sdma_sw_tear_down(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) case sdma_state_s10_hw_start_up_halt_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) sdma_sw_tear_down(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) sdma_set_state(sde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) sdma_state_s15_hw_start_up_clean_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) sdma_start_hw_clean_up(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) schedule_work(&sde->err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) case sdma_state_s15_hw_start_up_clean_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) sdma_sw_tear_down(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) sdma_hw_start_up(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) sdma_set_state(sde, ss->go_s99_running ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) sdma_state_s99_running :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) sdma_state_s20_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) case sdma_state_s20_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) sdma_sw_tear_down(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) sdma_set_state(sde, sdma_state_s99_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) schedule_work(&sde->err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) sdma_set_state(sde, sdma_state_s80_hw_freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) atomic_dec(&sde->dd->sdma_unfreeze_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) case sdma_state_s30_sw_clean_up_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) sdma_start_hw_clean_up(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) case sdma_state_s40_hw_clean_up_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) sdma_hw_start_up(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) sdma_set_state(sde, ss->go_s99_running ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) sdma_state_s99_running :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) sdma_state_s20_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) case sdma_state_s50_hw_halt_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) schedule_work(&sde->err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) case sdma_state_s60_idle_halt_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) schedule_work(&sde->err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) case sdma_state_s80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) case sdma_state_s82_freeze_sw_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) ss->go_s99_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) /* notify caller this engine is done cleaning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) atomic_dec(&sde->dd->sdma_unfreeze_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) sdma_hw_start_up(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) sdma_set_state(sde, ss->go_s99_running ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) sdma_state_s99_running :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) sdma_state_s20_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) case sdma_state_s99_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) case sdma_event_e00_go_hw_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) sdma_set_state(sde, sdma_state_s00_hw_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) case sdma_event_e10_go_hw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) case sdma_event_e15_hw_halt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) case sdma_event_e25_hw_clean_up_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) case sdma_event_e30_go_running:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) case sdma_event_e40_sw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) case sdma_event_e50_hw_cleaned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) case sdma_event_e60_hw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) need_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) sdma_err_progress_check_schedule(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) case sdma_event_e90_sw_halted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) * SW initiated halt does not perform engines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * progress check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) schedule_work(&sde->err_halt_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) case sdma_event_e70_go_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) case sdma_event_e85_link_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) ss->go_s99_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) case sdma_event_e80_hw_freeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) sdma_set_state(sde, sdma_state_s80_hw_freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) atomic_dec(&sde->dd->sdma_unfreeze_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) case sdma_event_e81_hw_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) case sdma_event_e82_hw_unfreeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) ss->last_event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (need_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) sdma_make_progress(sde, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) * _extend_sdma_tx_descs() - helper to extend txreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) * This is called once the initial nominal allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) * of descriptors in the sdma_txreq is exhausted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) * The code will bump the allocation up to the max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) * of MAX_DESC (64) descriptors. There doesn't seem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) * much point in an interim step. The last descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) * is reserved for coalesce buffer in order to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * cases where input packet has >MAX_DESC iovecs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) struct sdma_desc *descp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) /* Handle last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) /* if tlen is 0, it is for padding, release last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) if (!tx->tlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) tx->desc_limit = MAX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) } else if (!tx->coalesce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) /* allocate coalesce buffer with space for padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (!tx->coalesce_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) goto enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) tx->coalesce_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (unlikely(tx->num_desc == MAX_DESC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) goto enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (!descp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) goto enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) tx->descp = descp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) /* reserve last descriptor for coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) tx->desc_limit = MAX_DESC - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) /* copy ones already built */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) for (i = 0; i < tx->num_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) tx->descp[i] = tx->descs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) enomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) * This is called once the initial nominal allocation of descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) * in the sdma_txreq is exhausted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * This function calls _extend_sdma_tx_descs to extend or allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * coalesce buffer. If there is a allocated coalesce buffer, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) * copy the input packet data into the coalesce buffer. It also adds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * coalesce buffer descriptor once when whole packet is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) * <0 - error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) * 0 - coalescing, don't populate descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) * 1 - continue with populating descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) int type, void *kvaddr, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) unsigned long offset, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) int pad_len, rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) rval = _extend_sdma_tx_descs(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) /* If coalesce buffer is allocated, copy data into it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) if (tx->coalesce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) if (type == SDMA_MAP_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (type == SDMA_MAP_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) kvaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) kvaddr += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) } else if (WARN_ON(!kvaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) tx->coalesce_idx += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) if (type == SDMA_MAP_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) /* If there is more data, return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) if (tx->tlen - tx->coalesce_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) /* Whole packet is received; add any padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) pad_len = tx->packet_len & (sizeof(u32) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) pad_len = sizeof(u32) - pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) /* padding is taken care of for coalescing case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) tx->packet_len += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) tx->tlen += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) /* dma map the coalesce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) addr = dma_map_single(&dd->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) tx->coalesce_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) tx->tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) /* Add descriptor for coalesce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) tx->desc_limit = MAX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) addr, tx->tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) /* Update sdes when the lmc changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) u64 sreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) SD(CHECK_SLID_MASK_SHIFT)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) SD(CHECK_SLID_VALUE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) for (i = 0; i < dd->num_sdma; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) i, (u32)sreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) sde = &dd->per_sdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) write_sde_csr(sde, SD(CHECK_SLID), sreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) /* tx not dword sized - pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) tx->num_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if ((unlikely(tx->num_desc == tx->desc_limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) rval = _extend_sdma_tx_descs(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) __sdma_txclean(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) /* finish the one just added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) make_tx_sdma_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) SDMA_MAP_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) dd->sdma_pad_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) _sdma_close_tx(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * Add ahg to the sdma_txreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) * The logic will consume up to 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) * descriptors at the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) * sdma_txreq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) void _sdma_txreq_ahgadd(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) struct sdma_txreq *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) u8 num_ahg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) u8 ahg_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) u32 *ahg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) u8 ahg_hlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) u32 i, shift = 0, desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) u8 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) /* compute mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) if (num_ahg == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) mode = SDMA_AHG_APPLY_UPDATE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) else if (num_ahg <= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) mode = SDMA_AHG_APPLY_UPDATE2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) mode = SDMA_AHG_APPLY_UPDATE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) tx->num_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) /* initialize to consumed descriptors to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) case SDMA_AHG_APPLY_UPDATE3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) tx->num_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) tx->descs[2].qw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) tx->descs[2].qw[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) case SDMA_AHG_APPLY_UPDATE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) tx->num_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) tx->descs[1].qw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) tx->descs[1].qw[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) ahg_hlen >>= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) tx->descs[0].qw[1] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) << SDMA_DESC1_HEADER_INDEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) << SDMA_DESC1_HEADER_DWS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) << SDMA_DESC1_HEADER_MODE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) for (i = 0; i < (num_ahg - 1); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (!shift && !(i & 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) tx->descs[desc].qw[!!(i & 2)] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) (((u64)ahg[i + 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) shift = (shift + 32) & 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * sdma_ahg_alloc - allocate an AHG entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * @sde: engine to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) * -ENOSPC if an entry is not available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) int sdma_ahg_alloc(struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) int oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) if (!sde) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) trace_hfi1_ahg_allocate(sde, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) nr = ffz(READ_ONCE(sde->ahg_bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) if (nr > 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) trace_hfi1_ahg_allocate(sde, -ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) oldbit = test_and_set_bit(nr, &sde->ahg_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (!oldbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) trace_hfi1_ahg_allocate(sde, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) * sdma_ahg_free - free an AHG entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) * @sde: engine to return AHG entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) * @ahg_index: index to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) * This routine frees the indicate AHG entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (!sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) trace_hfi1_ahg_deallocate(sde, ahg_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) if (ahg_index < 0 || ahg_index > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) clear_bit(ahg_index, &sde->ahg_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) * SPC freeze handling for SDMA engines. Called when the driver knows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) * the SPC is going into a freeze but before the freeze is fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) * settled. Generally an error interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) * This event will pull the engine out of running so no more entries can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) * added to the engine's queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) enum sdma_events event = link_down ? sdma_event_e85_link_down :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) sdma_event_e80_hw_freeze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) /* set up the wait but do not wait here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) /* tell all engines to stop running and wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) for (i = 0; i < dd->num_sdma; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) sdma_process_event(&dd->per_sdma[i], event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) /* sdma_freeze() will wait for all engines to have stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) * SPC freeze handling for SDMA engines. Called when the driver knows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) * the SPC is fully frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) void sdma_freeze(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) * Make sure all engines have moved out of the running state before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) * continuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) atomic_read(&dd->sdma_unfreeze_count) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) /* interrupted or count is negative, then unloading - just exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) /* set up the count for the next wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) /* tell all engines that the SPC is frozen, they can start cleaning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) for (i = 0; i < dd->num_sdma; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) * Wait for everyone to finish software clean before exiting. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * software clean will read engine CSRs, so must be completed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) * the next step, which will clear the engine CSRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) atomic_read(&dd->sdma_unfreeze_count) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) /* no need to check results - done no matter what */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) * that is left is a software clean. We could do it after the SPC is fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) * frozen, but then we'd have to add another state to wait for the unfreeze.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) * Instead, just defer the software clean until the unfreeze step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) void sdma_unfreeze(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) /* tell all engines start freeze clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) for (i = 0; i < dd->num_sdma; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) sdma_process_event(&dd->per_sdma[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) sdma_event_e82_hw_unfreeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) * _sdma_engine_progress_schedule() - schedule progress on engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) * @sde: sdma_engine to schedule progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) void _sdma_engine_progress_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct sdma_engine *sde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) /* assume we have selected a good cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) write_csr(sde->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) sde->progress_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }