Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2016-2020 HabanaLabs, Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #ifndef HABANALABS_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define HABANALABS_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Defines that are asic-specific but constitutes as ABI between kernel driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * and userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START		0x8000	/* 32KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START	0x80	/* 128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT		48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define GAUDI_FIRST_AVAILABLE_W_S_MONITOR		24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * Goya queue Numbering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * The external queues (PCI DMA channels) MUST be before the internal queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * and each group (PCI DMA channels and internal) must be contiguous inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * itself but there can be a gap between the two groups (although not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * recommended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) enum goya_queue_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	GOYA_QUEUE_ID_DMA_0 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	GOYA_QUEUE_ID_DMA_1 = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	GOYA_QUEUE_ID_DMA_2 = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	GOYA_QUEUE_ID_DMA_3 = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	GOYA_QUEUE_ID_DMA_4 = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	GOYA_QUEUE_ID_CPU_PQ = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	GOYA_QUEUE_ID_MME = 6,	/* Internal queues start here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	GOYA_QUEUE_ID_TPC0 = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	GOYA_QUEUE_ID_TPC1 = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	GOYA_QUEUE_ID_TPC2 = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	GOYA_QUEUE_ID_TPC3 = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	GOYA_QUEUE_ID_TPC4 = 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	GOYA_QUEUE_ID_TPC5 = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	GOYA_QUEUE_ID_TPC6 = 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	GOYA_QUEUE_ID_TPC7 = 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	GOYA_QUEUE_ID_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * Gaudi queue Numbering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * Except one CPU queue, all the rest are internal queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) enum gaudi_queue_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	GAUDI_QUEUE_ID_DMA_0_0 = 0,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	GAUDI_QUEUE_ID_DMA_0_1 = 1,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	GAUDI_QUEUE_ID_DMA_0_2 = 2,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	GAUDI_QUEUE_ID_DMA_0_3 = 3,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	GAUDI_QUEUE_ID_DMA_1_0 = 4,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	GAUDI_QUEUE_ID_DMA_1_1 = 5,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	GAUDI_QUEUE_ID_DMA_1_2 = 6,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	GAUDI_QUEUE_ID_DMA_1_3 = 7,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	GAUDI_QUEUE_ID_CPU_PQ = 8,	/* CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	GAUDI_QUEUE_ID_DMA_2_0 = 9,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	GAUDI_QUEUE_ID_DMA_2_1 = 10,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	GAUDI_QUEUE_ID_DMA_2_2 = 11,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	GAUDI_QUEUE_ID_DMA_2_3 = 12,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	GAUDI_QUEUE_ID_DMA_3_0 = 13,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	GAUDI_QUEUE_ID_DMA_3_1 = 14,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	GAUDI_QUEUE_ID_DMA_3_2 = 15,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	GAUDI_QUEUE_ID_DMA_3_3 = 16,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	GAUDI_QUEUE_ID_DMA_4_0 = 17,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	GAUDI_QUEUE_ID_DMA_4_1 = 18,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	GAUDI_QUEUE_ID_DMA_4_2 = 19,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	GAUDI_QUEUE_ID_DMA_4_3 = 20,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	GAUDI_QUEUE_ID_DMA_5_0 = 21,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	GAUDI_QUEUE_ID_DMA_5_1 = 22,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	GAUDI_QUEUE_ID_DMA_5_2 = 23,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	GAUDI_QUEUE_ID_DMA_5_3 = 24,	/* external */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	GAUDI_QUEUE_ID_DMA_6_0 = 25,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	GAUDI_QUEUE_ID_DMA_6_1 = 26,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	GAUDI_QUEUE_ID_DMA_6_2 = 27,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	GAUDI_QUEUE_ID_DMA_6_3 = 28,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	GAUDI_QUEUE_ID_DMA_7_0 = 29,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	GAUDI_QUEUE_ID_DMA_7_1 = 30,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	GAUDI_QUEUE_ID_DMA_7_2 = 31,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	GAUDI_QUEUE_ID_DMA_7_3 = 32,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	GAUDI_QUEUE_ID_MME_0_0 = 33,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	GAUDI_QUEUE_ID_MME_0_1 = 34,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	GAUDI_QUEUE_ID_MME_0_2 = 35,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	GAUDI_QUEUE_ID_MME_0_3 = 36,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	GAUDI_QUEUE_ID_MME_1_0 = 37,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	GAUDI_QUEUE_ID_MME_1_1 = 38,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	GAUDI_QUEUE_ID_MME_1_2 = 39,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	GAUDI_QUEUE_ID_MME_1_3 = 40,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	GAUDI_QUEUE_ID_TPC_0_0 = 41,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	GAUDI_QUEUE_ID_TPC_0_1 = 42,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	GAUDI_QUEUE_ID_TPC_0_2 = 43,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	GAUDI_QUEUE_ID_TPC_0_3 = 44,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	GAUDI_QUEUE_ID_TPC_1_0 = 45,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	GAUDI_QUEUE_ID_TPC_1_1 = 46,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	GAUDI_QUEUE_ID_TPC_1_2 = 47,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	GAUDI_QUEUE_ID_TPC_1_3 = 48,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	GAUDI_QUEUE_ID_TPC_2_0 = 49,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	GAUDI_QUEUE_ID_TPC_2_1 = 50,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	GAUDI_QUEUE_ID_TPC_2_2 = 51,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	GAUDI_QUEUE_ID_TPC_2_3 = 52,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	GAUDI_QUEUE_ID_TPC_3_0 = 53,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	GAUDI_QUEUE_ID_TPC_3_1 = 54,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	GAUDI_QUEUE_ID_TPC_3_2 = 55,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	GAUDI_QUEUE_ID_TPC_3_3 = 56,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	GAUDI_QUEUE_ID_TPC_4_0 = 57,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	GAUDI_QUEUE_ID_TPC_4_1 = 58,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	GAUDI_QUEUE_ID_TPC_4_2 = 59,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	GAUDI_QUEUE_ID_TPC_4_3 = 60,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	GAUDI_QUEUE_ID_TPC_5_0 = 61,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	GAUDI_QUEUE_ID_TPC_5_1 = 62,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	GAUDI_QUEUE_ID_TPC_5_2 = 63,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	GAUDI_QUEUE_ID_TPC_5_3 = 64,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	GAUDI_QUEUE_ID_TPC_6_0 = 65,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	GAUDI_QUEUE_ID_TPC_6_1 = 66,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	GAUDI_QUEUE_ID_TPC_6_2 = 67,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	GAUDI_QUEUE_ID_TPC_6_3 = 68,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	GAUDI_QUEUE_ID_TPC_7_0 = 69,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	GAUDI_QUEUE_ID_TPC_7_1 = 70,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	GAUDI_QUEUE_ID_TPC_7_2 = 71,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	GAUDI_QUEUE_ID_TPC_7_3 = 72,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	GAUDI_QUEUE_ID_NIC_0_0 = 73,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	GAUDI_QUEUE_ID_NIC_0_1 = 74,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	GAUDI_QUEUE_ID_NIC_0_2 = 75,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	GAUDI_QUEUE_ID_NIC_0_3 = 76,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	GAUDI_QUEUE_ID_NIC_1_0 = 77,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	GAUDI_QUEUE_ID_NIC_1_1 = 78,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	GAUDI_QUEUE_ID_NIC_1_2 = 79,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	GAUDI_QUEUE_ID_NIC_1_3 = 80,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	GAUDI_QUEUE_ID_NIC_2_0 = 81,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	GAUDI_QUEUE_ID_NIC_2_1 = 82,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	GAUDI_QUEUE_ID_NIC_2_2 = 83,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	GAUDI_QUEUE_ID_NIC_2_3 = 84,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	GAUDI_QUEUE_ID_NIC_3_0 = 85,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	GAUDI_QUEUE_ID_NIC_3_1 = 86,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	GAUDI_QUEUE_ID_NIC_3_2 = 87,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	GAUDI_QUEUE_ID_NIC_3_3 = 88,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	GAUDI_QUEUE_ID_NIC_4_0 = 89,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	GAUDI_QUEUE_ID_NIC_4_1 = 90,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	GAUDI_QUEUE_ID_NIC_4_2 = 91,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	GAUDI_QUEUE_ID_NIC_4_3 = 92,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	GAUDI_QUEUE_ID_NIC_5_0 = 93,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	GAUDI_QUEUE_ID_NIC_5_1 = 94,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	GAUDI_QUEUE_ID_NIC_5_2 = 95,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	GAUDI_QUEUE_ID_NIC_5_3 = 96,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	GAUDI_QUEUE_ID_NIC_6_0 = 97,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	GAUDI_QUEUE_ID_NIC_6_1 = 98,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	GAUDI_QUEUE_ID_NIC_6_2 = 99,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	GAUDI_QUEUE_ID_NIC_6_3 = 100,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	GAUDI_QUEUE_ID_NIC_7_0 = 101,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	GAUDI_QUEUE_ID_NIC_7_1 = 102,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	GAUDI_QUEUE_ID_NIC_7_2 = 103,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	GAUDI_QUEUE_ID_NIC_7_3 = 104,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	GAUDI_QUEUE_ID_NIC_8_0 = 105,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	GAUDI_QUEUE_ID_NIC_8_1 = 106,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	GAUDI_QUEUE_ID_NIC_8_2 = 107,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	GAUDI_QUEUE_ID_NIC_8_3 = 108,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	GAUDI_QUEUE_ID_NIC_9_0 = 109,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	GAUDI_QUEUE_ID_NIC_9_1 = 110,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	GAUDI_QUEUE_ID_NIC_9_2 = 111,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	GAUDI_QUEUE_ID_NIC_9_3 = 112,	/* internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	GAUDI_QUEUE_ID_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * Engine Numbering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) enum goya_engine_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	GOYA_ENGINE_ID_DMA_0 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	GOYA_ENGINE_ID_DMA_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	GOYA_ENGINE_ID_DMA_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	GOYA_ENGINE_ID_DMA_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	GOYA_ENGINE_ID_DMA_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	GOYA_ENGINE_ID_MME_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	GOYA_ENGINE_ID_TPC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	GOYA_ENGINE_ID_TPC_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	GOYA_ENGINE_ID_TPC_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	GOYA_ENGINE_ID_TPC_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	GOYA_ENGINE_ID_TPC_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	GOYA_ENGINE_ID_TPC_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	GOYA_ENGINE_ID_TPC_6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	GOYA_ENGINE_ID_TPC_7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	GOYA_ENGINE_ID_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) enum gaudi_engine_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	GAUDI_ENGINE_ID_DMA_0 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	GAUDI_ENGINE_ID_DMA_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	GAUDI_ENGINE_ID_DMA_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	GAUDI_ENGINE_ID_DMA_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	GAUDI_ENGINE_ID_DMA_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	GAUDI_ENGINE_ID_DMA_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	GAUDI_ENGINE_ID_DMA_6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	GAUDI_ENGINE_ID_DMA_7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	GAUDI_ENGINE_ID_MME_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	GAUDI_ENGINE_ID_MME_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	GAUDI_ENGINE_ID_MME_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	GAUDI_ENGINE_ID_MME_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	GAUDI_ENGINE_ID_TPC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	GAUDI_ENGINE_ID_TPC_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	GAUDI_ENGINE_ID_TPC_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	GAUDI_ENGINE_ID_TPC_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	GAUDI_ENGINE_ID_TPC_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	GAUDI_ENGINE_ID_TPC_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	GAUDI_ENGINE_ID_TPC_6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	GAUDI_ENGINE_ID_TPC_7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	GAUDI_ENGINE_ID_NIC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	GAUDI_ENGINE_ID_NIC_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	GAUDI_ENGINE_ID_NIC_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	GAUDI_ENGINE_ID_NIC_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	GAUDI_ENGINE_ID_NIC_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	GAUDI_ENGINE_ID_NIC_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	GAUDI_ENGINE_ID_NIC_6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	GAUDI_ENGINE_ID_NIC_7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	GAUDI_ENGINE_ID_NIC_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	GAUDI_ENGINE_ID_NIC_9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	GAUDI_ENGINE_ID_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) enum hl_device_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	HL_DEVICE_STATUS_OPERATIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	HL_DEVICE_STATUS_IN_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	HL_DEVICE_STATUS_MALFUNCTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) /* Opcode for management ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * HW_IP_INFO            - Receive information about different IP blocks in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  *                         device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * HL_INFO_HW_EVENTS     - Receive an array describing how many times each event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  *                         occurred since the last hard reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  * HL_INFO_DRAM_USAGE    - Retrieve the dram usage inside the device and of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  *                         specific context. This is relevant only for devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  *                         where the dram is managed by the kernel driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * HL_INFO_HW_IDLE       - Retrieve information about the idle status of each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  *                         internal engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  *                         require an open context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * HL_INFO_DEVICE_UTILIZATION  - Retrieve the total utilization of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  *                               over the last period specified by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *                               The period can be between 100ms to 1s, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  *                               resolution of 100ms. The return value is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  *                               percentage of the utilization rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  *                               event occurred since the driver was loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * HL_INFO_CLK_RATE            - Retrieve the current and maximum clock rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  *                               of the device in MHz. The maximum clock rate is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *                               configurable via sysfs parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * HL_INFO_RESET_COUNT   - Retrieve the counts of the soft and hard reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *                         operations performed on the device since the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *                         time the driver was loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  * HL_INFO_TIME_SYNC     - Retrieve the device's time alongside the host's time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  *                         for synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  * HL_INFO_CS_COUNTERS   - Retrieve command submission counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  * HL_INFO_PCI_COUNTERS  - Retrieve PCI counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * HL_INFO_SYNC_MANAGER  - Retrieve sync manager info per dcore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  * HL_INFO_TOTAL_ENERGY  - Retrieve total energy consumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) #define HL_INFO_HW_IP_INFO		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) #define HL_INFO_HW_EVENTS		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #define HL_INFO_DRAM_USAGE		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #define HL_INFO_HW_IDLE			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) #define HL_INFO_DEVICE_STATUS		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #define HL_INFO_DEVICE_UTILIZATION	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #define HL_INFO_HW_EVENTS_AGGREGATE	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) #define HL_INFO_CLK_RATE		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #define HL_INFO_RESET_COUNT		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #define HL_INFO_TIME_SYNC		10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) #define HL_INFO_CS_COUNTERS		11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define HL_INFO_PCI_COUNTERS		12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define HL_INFO_CLK_THROTTLE_REASON	13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) #define HL_INFO_SYNC_MANAGER		14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define HL_INFO_TOTAL_ENERGY		15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) #define HL_INFO_VERSION_MAX_LEN	128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) #define HL_INFO_CARD_NAME_MAX_LEN	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) struct hl_info_hw_ip_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	__u64 sram_base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	__u64 dram_base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	__u64 dram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	__u32 sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	__u32 num_of_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	__u32 device_id; /* PCI Device ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	__u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	__u32 reserved[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	__u32 cpld_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	__u32 psoc_pci_pll_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	__u32 psoc_pci_pll_nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	__u32 psoc_pci_pll_od;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	__u32 psoc_pci_pll_div_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	__u8 tpc_enabled_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	__u8 dram_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	__u8 pad[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	__u8 cpucp_version[HL_INFO_VERSION_MAX_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	__u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) struct hl_info_dram_usage {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	__u64 dram_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	__u64 ctx_dram_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) struct hl_info_hw_idle {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	__u32 is_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	 * Bitmask of busy engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	 * Bits definition is according to `enum <chip>_enging_id'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	__u32 busy_engines_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 * Extended Bitmask of busy engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 * Bits definition is according to `enum <chip>_enging_id'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	__u64 busy_engines_mask_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) struct hl_info_device_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	__u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) struct hl_info_device_utilization {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	__u32 utilization;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) struct hl_info_clk_rate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	__u32 cur_clk_rate_mhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	__u32 max_clk_rate_mhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) struct hl_info_reset_count {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	__u32 hard_reset_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	__u32 soft_reset_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) struct hl_info_time_sync {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	__u64 device_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	__u64 host_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * struct hl_info_pci_counters - pci counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * @rx_throughput: PCI rx throughput KBps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * @tx_throughput: PCI tx throughput KBps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * @replay_cnt: PCI replay counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) struct hl_info_pci_counters {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	__u64 rx_throughput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	__u64 tx_throughput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	__u64 replay_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) #define HL_CLK_THROTTLE_POWER	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) #define HL_CLK_THROTTLE_THERMAL	0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * struct hl_info_clk_throttle - clock throttling reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * @clk_throttling_reason: each bit represents a clk throttling reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) struct hl_info_clk_throttle {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	__u32 clk_throttling_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * struct hl_info_energy - device energy information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * @total_energy_consumption: total device energy consumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) struct hl_info_energy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	__u64 total_energy_consumption;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * struct hl_info_sync_manager - sync manager information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * @first_available_sync_object: first available sob
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * @first_available_monitor: first available monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) struct hl_info_sync_manager {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	__u32 first_available_sync_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	__u32 first_available_monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  * struct hl_info_cs_counters - command submission counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * @out_of_mem_drop_cnt: dropped due to memory allocation issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  * @parsing_drop_cnt: dropped due to error in packet parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  * @queue_full_drop_cnt: dropped due to queue full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  * @device_in_reset_drop_cnt: dropped due to device in reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) struct hl_cs_counters {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	__u64 out_of_mem_drop_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	__u64 parsing_drop_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	__u64 queue_full_drop_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	__u64 device_in_reset_drop_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	__u64 max_cs_in_flight_drop_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) struct hl_info_cs_counters {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	struct hl_cs_counters cs_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct hl_cs_counters ctx_cs_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) enum gaudi_dcores {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	HL_GAUDI_WS_DCORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	HL_GAUDI_WN_DCORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	HL_GAUDI_EN_DCORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	HL_GAUDI_ES_DCORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) struct hl_info_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	/* Location of relevant struct in userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	__u64 return_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	 * The size of the return value. Just like "size" in "snprintf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	 * it limits how many bytes the kernel can write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 * For hw_events array, the size should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	 * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	__u32 return_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* HL_INFO_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	__u32 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		/* Dcore id for which the information is relevant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		 * For Gaudi refer to 'enum gaudi_dcores'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		__u32 dcore_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		/* Context ID - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		__u32 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		/* Period value for utilization rate (100ms - 1000ms, in 100ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 * resolution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		__u32 period_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) /* Opcode to create a new command buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) #define HL_CB_OP_CREATE		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) /* Opcode to destroy previously created command buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #define HL_CB_OP_DESTROY	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) /* 2MB minus 32 bytes for 2xMSG_PROT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) #define HL_MAX_CB_SIZE		(0x200000 - 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) /* Indicates whether the command buffer should be mapped to the device's MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) #define HL_CB_FLAGS_MAP		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) struct hl_cb_in {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	/* Handle of CB or 0 if we want to create one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	__u64 cb_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* HL_CB_OP_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	__u32 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * will be allocated, regardless of this parameter's value, is PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	__u32 cb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	/* Context ID - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	__u32 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	/* HL_CB_FLAGS_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	__u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) struct hl_cb_out {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	/* Handle of CB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	__u64 cb_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) union hl_cb_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct hl_cb_in in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct hl_cb_out out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * This structure size must always be fixed to 64-bytes for backward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) struct hl_cs_chunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		/* For external queue, this represents a Handle of CB on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		 * Host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		 * For internal queue in Goya, this represents an SRAM or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		 * a DRAM address of the internal CB. In Gaudi, this might also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		 * represent a mapped host address of the CB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		 * A mapped host address is in the device address space, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		 * a host address was mapped by the device MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		__u64 cb_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		/* Relevant only when HL_CS_FLAGS_WAIT is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		 * This holds address of array of u64 values that contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		 * signal CS sequence numbers. The wait described by this job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		 * will listen on all those signals (wait event per signal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		__u64 signal_seq_arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	/* Index of queue to put the CB on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	__u32 queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		 * Size of command buffer with valid packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		 * Can be smaller then actual CB size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		__u32 cb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		/* Relevant only when HL_CS_FLAGS_WAIT is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		 * Number of entries in signal_seq_arr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		__u32 num_signal_seq_arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* HL_CS_CHUNK_FLAGS_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	__u32 cs_chunk_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/* Align structure to 64 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	__u32 pad[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) /* SIGNAL and WAIT flags are mutually exclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) #define HL_CS_FLAGS_FORCE_RESTORE	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) #define HL_CS_FLAGS_SIGNAL		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) #define HL_CS_FLAGS_WAIT		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) #define HL_CS_STATUS_SUCCESS		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) #define HL_MAX_JOBS_PER_CS		512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) struct hl_cs_in {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	/* this holds address of array of hl_cs_chunk for restore phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	__u64 chunks_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	/* holds address of array of hl_cs_chunk for execution phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	__u64 chunks_execute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	/* this holds address of array of hl_cs_chunk for store phase -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 * Currently not in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	__u64 chunks_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	/* Number of chunks in restore phase array. Maximum number is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 * HL_MAX_JOBS_PER_CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	__u32 num_chunks_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	/* Number of chunks in execution array. Maximum number is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * HL_MAX_JOBS_PER_CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	__u32 num_chunks_execute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/* Number of chunks in restore phase array - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	__u32 num_chunks_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	/* HL_CS_FLAGS_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	__u32 cs_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	/* Context ID - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	__u32 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) struct hl_cs_out {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * seq holds the sequence number of the CS to pass to wait ioctl. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * values are valid except for 0 and ULLONG_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	__u64 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/* HL_CS_STATUS_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	__u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) union hl_cs_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct hl_cs_in in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct hl_cs_out out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) struct hl_wait_cs_in {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	/* Command submission sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	__u64 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/* Absolute timeout to wait in microseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	__u64 timeout_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	/* Context ID - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	__u32 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) #define HL_WAIT_CS_STATUS_COMPLETED	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) #define HL_WAIT_CS_STATUS_BUSY		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) #define HL_WAIT_CS_STATUS_TIMEDOUT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) #define HL_WAIT_CS_STATUS_ABORTED	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) #define HL_WAIT_CS_STATUS_INTERRUPTED	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) struct hl_wait_cs_out {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	/* HL_WAIT_CS_STATUS_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	__u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) union hl_wait_cs_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	struct hl_wait_cs_in in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	struct hl_wait_cs_out out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) /* Opcode to allocate device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) #define HL_MEM_OP_ALLOC			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) /* Opcode to free previously allocated device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) #define HL_MEM_OP_FREE			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) /* Opcode to map host and device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) #define HL_MEM_OP_MAP			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) /* Opcode to unmap previously mapped host and device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) #define HL_MEM_OP_UNMAP			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) /* Memory flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) #define HL_MEM_CONTIGUOUS	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) #define HL_MEM_SHARED		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) #define HL_MEM_USERPTR		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) struct hl_mem_in {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		/* HL_MEM_OP_ALLOC- allocate device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			/* Size to alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			__u64 mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		} alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		/* HL_MEM_OP_FREE - free device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			/* Handle returned from HL_MEM_OP_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			__u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		} free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		/* HL_MEM_OP_MAP - map device memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			 * Requested virtual address of mapped memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			 * The driver will try to map the requested region to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			 * this hint address, as long as the address is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			 * and not already mapped. The user should check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			 * returned address of the IOCTL to make sure he got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			 * the hint address. Passing 0 here means that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			 * driver will choose the address itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			__u64 hint_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			/* Handle returned from HL_MEM_OP_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			__u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		} map_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		/* HL_MEM_OP_MAP - map host memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			/* Address of allocated host memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			__u64 host_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			 * Requested virtual address of mapped memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			 * The driver will try to map the requested region to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			 * this hint address, as long as the address is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			 * and not already mapped. The user should check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			 * returned address of the IOCTL to make sure he got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			 * the hint address. Passing 0 here means that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			 * driver will choose the address itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			__u64 hint_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			/* Size of allocated host memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			__u64 mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		} map_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		/* HL_MEM_OP_UNMAP - unmap host memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			/* Virtual address returned from HL_MEM_OP_MAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			__u64 device_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		} unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	/* HL_MEM_OP_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	__u32 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* HL_MEM_* flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	__u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	/* Context ID - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	__u32 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) struct hl_mem_out {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		 * Used for HL_MEM_OP_MAP as the virtual address that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		 * assigned in the device VA space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		 * A value of 0 means the requested operation failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		__u64 device_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		 * Used for HL_MEM_OP_ALLOC. This is the assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		 * handle for the allocated memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		__u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) union hl_mem_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	struct hl_mem_in in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct hl_mem_out out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) #define HL_DEBUG_MAX_AUX_VALUES		10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) struct hl_debug_params_etr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/* Address in memory to allocate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	__u64 buffer_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* Size of buffer to allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	__u64 buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/* Sink operation mode: SW fifo, HW fifo, Circular buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	__u32 sink_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) struct hl_debug_params_etf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* Address in memory to allocate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	__u64 buffer_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/* Size of buffer to allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	__u64 buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/* Sink operation mode: SW fifo, HW fifo, Circular buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	__u32 sink_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) struct hl_debug_params_stm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/* Two bit masks for HW event and Stimulus Port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	__u64 he_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	__u64 sp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/* Trace source ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	__u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* Frequency for the timestamp register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	__u32 frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) struct hl_debug_params_bmon {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/* Two address ranges that the user can request to filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	__u64 start_addr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	__u64 addr_mask0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	__u64 start_addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	__u64 addr_mask1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	/* Capture window configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	__u32 bw_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	__u32 win_capture;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* Trace source ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	__u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) struct hl_debug_params_spmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	/* Event types selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	__u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	/* Number of event types selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	__u32 event_types_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	__u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) /* Opcode for ETR component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) #define HL_DEBUG_OP_ETR		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) /* Opcode for ETF component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) #define HL_DEBUG_OP_ETF		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) /* Opcode for STM component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) #define HL_DEBUG_OP_STM		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) /* Opcode for FUNNEL component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) #define HL_DEBUG_OP_FUNNEL	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) /* Opcode for BMON component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) #define HL_DEBUG_OP_BMON	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) /* Opcode for SPMU component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) #define HL_DEBUG_OP_SPMU	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) /* Opcode for timestamp (deprecated) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) #define HL_DEBUG_OP_TIMESTAMP	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) /* Opcode for setting the device into or out of debug mode. The enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * variable should be 1 for enabling debug mode and 0 for disabling it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) #define HL_DEBUG_OP_SET_MODE	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) struct hl_debug_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	 * Pointer to user input structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	 * This field is relevant to specific opcodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	__u64 input_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	/* Pointer to user output structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	__u64 output_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/* Size of user input structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	__u32 input_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	/* Size of user output structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	__u32 output_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* HL_DEBUG_OP_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	__u32 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	 * Register index in the component, taken from the debug_regs_index enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	 * in the various ASIC header files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	__u32 reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	/* Enable/disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	__u32 enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	/* Context ID - Currently not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	__u32 ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * Various information operations such as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * - H/W IP information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * - Current dram usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * The user calls this IOCTL with an opcode that describes the required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * information. The user should supply a pointer to a user-allocated memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * chunk, which will be filled by the driver with the requested information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * The user supplies the maximum amount of size to copy into the user's memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * in order to prevent data corruption in case of differences between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  * definitions of structures in kernel and userspace, e.g. in case of old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  * userspace and new kernel driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) #define HL_IOCTL_INFO	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		_IOWR('H', 0x01, struct hl_info_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * Command Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * - Request a Command Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * - Destroy a Command Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * The command buffers are memory blocks that reside in DMA-able address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * space and are physically contiguous so they can be accessed by the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * directly. They are allocated using the coherent DMA API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * When creating a new CB, the IOCTL returns a handle of it, and the user-space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * process needs to use that handle to mmap the buffer so it can access them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * In some instances, the device must access the command buffer through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * device's MMU, and thus its memory should be mapped. In these cases, user can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  * indicate the driver that such a mapping is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * The resulting device virtual address will be used internally by the driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * and won't be returned to user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) #define HL_IOCTL_CB		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		_IOWR('H', 0x02, union hl_cb_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  * Command Submission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  * To submit work to the device, the user need to call this IOCTL with a set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * of JOBS. That set of JOBS constitutes a CS object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  * Each JOB will be enqueued on a specific queue, according to the user's input.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  * There can be more then one JOB per queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  * a second set is for "execution" phase and a third set is for "store" phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  * The JOBS on the "restore" phase are enqueued only after context-switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  * (or if its the first CS for this context). The user can also order the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  * driver to run the "restore" phase explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * There are two types of queues - external and internal. External queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  * are DMA queues which transfer data from/to the Host. All other queues are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  * internal. The driver will get completion notifications from the device only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  * on JOBS which are enqueued in the external queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * For jobs on external queues, the user needs to create command buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * internal queues, the user needs to prepare a "command buffer" with packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * on either the device SRAM/DRAM or the host, and give the device address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * that buffer to the CS ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * This IOCTL is asynchronous in regard to the actual execution of the CS. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * means it returns immediately after ALL the JOBS were enqueued on their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * relevant queues. Therefore, the user mustn't assume the CS has been completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * or has even started to execute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  * Upon successful enqueue, the IOCTL returns a sequence number which the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * can use with the "Wait for CS" IOCTL to check whether the handle's CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * external JOBS have been completed. Note that if the CS has internal JOBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * which can execute AFTER the external JOBS have finished, the driver might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * report that the CS has finished executing BEFORE the internal JOBS have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * actually finished executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * Even though the sequence number increments per CS, the user can NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * automatically assume that if CS with sequence number N finished, then CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  * with sequence number N-1 also finished. The user can make this assumption if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  * and only if CS N and CS N-1 are exactly the same (same CBs for the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  * queues).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) #define HL_IOCTL_CS			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		_IOWR('H', 0x03, union hl_cs_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * Wait for Command Submission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * The user can call this IOCTL with a handle it received from the CS IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * to wait until the handle's CS has finished executing. The user will wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * inside the kernel until the CS has finished or until the user-requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  * timeout has expired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * If the timeout value is 0, the driver won't sleep at all. It will check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * the status of the CS and return immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * The return value of the IOCTL is a standard Linux error code. The possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * values are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * EINTR     - Kernel waiting has been interrupted, e.g. due to OS signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *             that the user process received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * ETIMEDOUT - The CS has caused a timeout on the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * EIO       - The CS was aborted (usually because the device was reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * ENODEV    - The device wants to do hard-reset (so user need to close FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * The driver also returns a custom define inside the IOCTL which can be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * HL_WAIT_CS_STATUS_COMPLETED   - The CS has been completed successfully (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * HL_WAIT_CS_STATUS_BUSY        - The CS is still executing (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  * HL_WAIT_CS_STATUS_TIMEDOUT    - The CS has caused a timeout on the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  *                                 (ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * HL_WAIT_CS_STATUS_ABORTED     - The CS was aborted, usually because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  *                                 device was reset (EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #define HL_IOCTL_WAIT_CS			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		_IOWR('H', 0x04, union hl_wait_cs_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  * Memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * - Map host memory to device MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  * - Unmap host memory from device MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * This IOCTL allows the user to map host memory to the device MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * For host memory, the IOCTL doesn't allocate memory. The user is supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * to allocate the memory in user-space (malloc/new). The driver pins the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * physical pages (up to the allowed limit by the OS), assigns a virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * address in the device VA space and initializes the device MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * There is an option for the user to specify the requested virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) #define HL_IOCTL_MEMORY		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		_IOWR('H', 0x05, union hl_mem_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  * Debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * This IOCTL allows the user to get debug traces from the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  * Before the user can send configuration requests of the various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * debug/profile engines, it needs to set the device into debug mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * This is because the debug/profile infrastructure is shared component in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  * device and we can't allow multiple users to access it at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * Once a user set the device into debug mode, the driver won't allow other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  * users to "work" with the device, i.e. open a FD. If there are multiple users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  * opened on the device, the driver won't allow any user to debug the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  * For each configuration request, the user needs to provide the register index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  * and essential data such as buffer address and size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  * Once the user has finished using the debug/profile engines, he should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * set the device into non-debug mode, i.e. disable debug mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * The driver can decide to "kick out" the user if he abuses this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) #define HL_IOCTL_DEBUG		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		_IOWR('H', 0x06, struct hl_debug_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) #define HL_COMMAND_START	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) #define HL_COMMAND_END		0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) #endif /* HABANALABS_H_ */