Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright 2008 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2008 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 2009 Jerome Glisse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * Authors: Dave Airlie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *          Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *          Jerome Glisse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #ifndef __RADEON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define __RADEON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /* TODO: Here are things that needs to be done :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *	- surface allocator & initializer : (bit like scratch reg) should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *	  related to surface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *	- WB : write back stuff (do it bit like scratch reg things)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *	- Vblank : look at Jesse's rework and what we should do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *	- r600/r700: gart & cp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *	- cs : clean cs ioctl use bitmap & things like that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *	- power management stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *	- Barrier in gart code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *	- Unmappabled vram ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *	- TESTING, TESTING, TESTING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) /* Initialization path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *  We expect that acceleration initialization might fail for various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  *  reasons even thought we work hard to make it works on most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *  configurations. In order to still have a working userspace in such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *  situation the init path must succeed up to the memory controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *  initialization point. Failure before this point are considered as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *  fatal error. Here is the init callchain :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *      radeon_device_init  perform common structure, mutex initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *      asic_init           setup the GPU memory layout and perform all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *                          one time initialization (failure in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *                          function are considered fatal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *      asic_startup        setup the GPU acceleration, in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *                          follow guideline the first thing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *                          function should do is setting the GPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  *                          memory controller (only MC setup failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  *                          are considered as fatal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/interval_tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/dma-fence.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #ifdef CONFIG_MMU_NOTIFIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <drm/ttm/ttm_bo_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #include <drm/ttm/ttm_bo_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #include <drm/ttm/ttm_placement.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #include <drm/ttm/ttm_module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #include <drm/ttm/ttm_execbuf_util.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include <drm/drm_gem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #include "radeon_family.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #include "radeon_mode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #include "radeon_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * Modules parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) extern int radeon_no_wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) extern int radeon_modeset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) extern int radeon_dynclks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) extern int radeon_r4xx_atom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) extern int radeon_agpmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) extern int radeon_vram_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) extern int radeon_gart_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) extern int radeon_benchmarking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) extern int radeon_testing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) extern int radeon_connector_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) extern int radeon_tv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) extern int radeon_audio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) extern int radeon_disp_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) extern int radeon_hw_i2c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) extern int radeon_pcie_gen2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) extern int radeon_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) extern int radeon_lockup_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) extern int radeon_fastfb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) extern int radeon_dpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) extern int radeon_aspm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) extern int radeon_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) extern int radeon_hard_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) extern int radeon_vm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) extern int radeon_vm_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) extern int radeon_deep_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) extern int radeon_use_pflipirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) extern int radeon_bapm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) extern int radeon_backlight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) extern int radeon_auxch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) extern int radeon_mst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) extern int radeon_uvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) extern int radeon_vce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) extern int radeon_si_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) extern int radeon_cik_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * Copy from radeon_drv.h so we don't have to include both and have conflicting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * symbol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define RADEON_USEC_IB_TEST_TIMEOUT		1000000 /* 1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) /* RADEON_IB_POOL_SIZE must be a power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define RADEON_IB_POOL_SIZE			16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define RADEON_DEBUGFS_MAX_COMPONENTS		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define RADEONFB_CONN_LIMIT			4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define RADEON_BIOS_NUM_SCRATCH			8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) /* internal ring indices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) /* r1xx+ has gfx CP ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define RADEON_RING_TYPE_GFX_INDEX		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) /* cayman has 2 compute CP rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define CAYMAN_RING_TYPE_CP1_INDEX		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define CAYMAN_RING_TYPE_CP2_INDEX		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) /* R600+ has an async dma ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define R600_RING_TYPE_DMA_INDEX		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /* cayman add a second async dma ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define CAYMAN_RING_TYPE_DMA1_INDEX		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /* R600+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define R600_RING_TYPE_UVD_INDEX		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /* TN+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define TN_RING_TYPE_VCE1_INDEX			6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define TN_RING_TYPE_VCE2_INDEX			7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) /* max number of rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define RADEON_NUM_RINGS			8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) /* number of hw syncs before falling back on blocking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define RADEON_NUM_SYNCS			4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) /* hardcode those limit for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define RADEON_VA_IB_OFFSET			(1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define RADEON_VA_RESERVED_SIZE			(8 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define RADEON_IB_VM_MAX_SIZE			(64 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) /* hard reset data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define RADEON_ASIC_RESET_DATA                  0x39d5e86b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /* reset flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define RADEON_RESET_GFX			(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define RADEON_RESET_COMPUTE			(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define RADEON_RESET_DMA			(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define RADEON_RESET_CP				(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #define RADEON_RESET_GRBM			(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define RADEON_RESET_DMA1			(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define RADEON_RESET_RLC			(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define RADEON_RESET_SEM			(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define RADEON_RESET_IH				(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define RADEON_RESET_VMC			(1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define RADEON_RESET_MC				(1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #define RADEON_RESET_DISPLAY			(1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) /* CG block flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define RADEON_CG_BLOCK_GFX			(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define RADEON_CG_BLOCK_MC			(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define RADEON_CG_BLOCK_SDMA			(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define RADEON_CG_BLOCK_UVD			(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define RADEON_CG_BLOCK_VCE			(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define RADEON_CG_BLOCK_HDP			(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define RADEON_CG_BLOCK_BIF			(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) /* CG flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define RADEON_CG_SUPPORT_GFX_MGCG		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) #define RADEON_CG_SUPPORT_GFX_MGLS		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #define RADEON_CG_SUPPORT_GFX_CGCG		(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) #define RADEON_CG_SUPPORT_GFX_CGLS		(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #define RADEON_CG_SUPPORT_GFX_CGTS		(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #define RADEON_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #define RADEON_CG_SUPPORT_GFX_CP_LS		(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) #define RADEON_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define RADEON_CG_SUPPORT_MC_LS			(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define RADEON_CG_SUPPORT_MC_MGCG		(1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #define RADEON_CG_SUPPORT_SDMA_LS		(1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define RADEON_CG_SUPPORT_SDMA_MGCG		(1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define RADEON_CG_SUPPORT_BIF_LS		(1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #define RADEON_CG_SUPPORT_UVD_MGCG		(1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define RADEON_CG_SUPPORT_VCE_MGCG		(1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define RADEON_CG_SUPPORT_HDP_LS		(1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define RADEON_CG_SUPPORT_HDP_MGCG		(1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) /* PG flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #define RADEON_PG_SUPPORT_GFX_PG		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define RADEON_PG_SUPPORT_GFX_SMG		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define RADEON_PG_SUPPORT_GFX_DMG		(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define RADEON_PG_SUPPORT_UVD			(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) #define RADEON_PG_SUPPORT_VCE			(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #define RADEON_PG_SUPPORT_CP			(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #define RADEON_PG_SUPPORT_GDS			(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) #define RADEON_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #define RADEON_PG_SUPPORT_SDMA			(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #define RADEON_PG_SUPPORT_ACP			(1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define RADEON_PG_SUPPORT_SAMU			(1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) /* max cursor sizes (in pixels) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #define CURSOR_WIDTH 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #define CURSOR_HEIGHT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define CIK_CURSOR_WIDTH 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define CIK_CURSOR_HEIGHT 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * Errata workarounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) enum radeon_pll_errata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	CHIP_ERRATA_R300_CG             = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	CHIP_ERRATA_PLL_DELAY           = 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) struct radeon_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * BIOS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) bool radeon_get_bios(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * Dummy page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) struct radeon_dummy_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	uint64_t	entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct page	*page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	dma_addr_t	addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) int radeon_dummy_page_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) void radeon_dummy_page_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * Clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) struct radeon_clock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct radeon_pll p1pll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	struct radeon_pll p2pll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct radeon_pll dcpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct radeon_pll spll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct radeon_pll mpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	/* 10 Khz units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	uint32_t default_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	uint32_t default_sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	uint32_t default_dispclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	uint32_t current_dispclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	uint32_t dp_extclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	uint32_t max_pixel_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	uint32_t vco_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * Power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) int radeon_pm_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) int radeon_pm_late_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) void radeon_pm_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) void radeon_pm_compute_clocks(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) void radeon_pm_suspend(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) void radeon_pm_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) void radeon_combios_get_power_modes(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) void radeon_atombios_get_power_modes(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				   u8 clock_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 				   u32 clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 				   bool strobe_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				   struct atom_clock_dividers *dividers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 					u32 clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 					bool strobe_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 					struct atom_mpll_param *mpll_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 					  u16 voltage_level, u8 voltage_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 					  u32 *gpio_value, u32 *gpio_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 					 u32 eng_clock, u32 mem_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) int radeon_atom_get_voltage_step(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				 u8 voltage_type, u16 *voltage_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			     u16 voltage_id, u16 *voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 						      u16 *voltage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 						      u16 leakage_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 					  u16 *leakage_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 							 u16 *vddc, u16 *vddci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 							 u16 virtual_voltage_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 							 u16 vbios_voltage_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				u16 virtual_voltage_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				u16 *voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 				      u8 voltage_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				      u16 nominal_voltage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				      u16 *true_voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) int radeon_atom_get_min_voltage(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 				u8 voltage_type, u16 *min_voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) int radeon_atom_get_max_voltage(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				u8 voltage_type, u16 *max_voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) int radeon_atom_get_voltage_table(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 				  u8 voltage_type, u8 voltage_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 				  struct atom_voltage_table *voltage_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 				 u8 voltage_type, u8 voltage_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) int radeon_atom_get_svi2_info(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			      u8 voltage_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			      u8 *svd_gpio_id, u8 *svc_gpio_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) void radeon_atom_update_memory_dll(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				   u32 mem_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) void radeon_atom_set_ac_timing(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			       u32 mem_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				  u8 module_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 				  struct atom_mc_reg_table *reg_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) int radeon_atom_get_memory_info(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 				u8 module_index, struct atom_memory_info *mem_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 				     bool gddr5, u8 module_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 				     struct atom_memory_clock_range_table *mclk_range_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			     u16 voltage_id, u16 *voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) void rs690_pm_info(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 				    unsigned *bankh, unsigned *mtaspect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				    unsigned *tile_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * Fences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) struct radeon_fence_driver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct radeon_device		*rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	uint32_t			scratch_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	uint64_t			gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	volatile uint32_t		*cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/* sync_seq is protected by ring emission lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	uint64_t			sync_seq[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	atomic64_t			last_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	bool				initialized, delayed_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct delayed_work		lockup_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) struct radeon_fence {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct dma_fence		base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	struct radeon_device	*rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	uint64_t		seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/* RB, DMA, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	unsigned		ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	bool			is_vm_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	wait_queue_entry_t		fence_wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) int radeon_fence_driver_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) void radeon_fence_driver_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) void radeon_fence_process(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) bool radeon_fence_signaled(struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) int radeon_fence_wait_any(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			  struct radeon_fence **fences,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			  bool intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) void radeon_fence_unref(struct radeon_fence **fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 						      struct radeon_fence *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (!a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (!b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	BUG_ON(a->ring != b->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (a->seq > b->seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 					   struct radeon_fence *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (!a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	if (!b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	BUG_ON(a->ring != b->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return a->seq < b->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * Tiling registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) struct radeon_surface_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	struct radeon_bo *bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) #define RADEON_GEM_MAX_SURFACES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  * TTM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) struct radeon_mman {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct ttm_bo_device		bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	bool				initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct dentry			*vram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct dentry			*gtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) struct radeon_bo_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	struct radeon_bo		*robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	struct ttm_validate_buffer	tv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	uint64_t			gpu_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	unsigned			preferred_domains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	unsigned			allowed_domains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	uint32_t			tiling_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) /* bo virtual address in a specific vm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) struct radeon_bo_va {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	/* protected by bo being reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct list_head		bo_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	uint32_t			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct radeon_fence		*last_pt_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	unsigned			ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	/* protected by vm mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct interval_tree_node	it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct list_head		vm_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	/* constant after initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	struct radeon_vm		*vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct radeon_bo		*bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) struct radeon_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	/* Protected by gem.mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct list_head		list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/* Protected by tbo.reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	u32				initial_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	struct ttm_place		placements[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct ttm_placement		placement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	struct ttm_buffer_object	tbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	struct ttm_bo_kmap_obj		kmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	u32				flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	unsigned			pin_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	void				*kptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	u32				tiling_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	u32				pitch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	int				surface_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	unsigned			prime_shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	/* list of all virtual address to which this bo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 * is associated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct list_head		va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Constant after initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct radeon_device		*rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct ttm_bo_kmap_obj		dma_buf_vmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	pid_t				pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) #ifdef CONFIG_MMU_NOTIFIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct mmu_interval_notifier	notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) int radeon_gem_debugfs_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) /* sub-allocation manager, it has to be protected by another lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  * By conception this is an helper for other part of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * like the indirect buffer or semaphore, which both have their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  * locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * Principe is simple, we keep a list of sub allocation in offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * order (first entry has offset == 0, last entry has the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * offset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * When allocating new object we first check if there is room at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * the end total_size - (last_object_offset + last_object_size) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  * alloc_size. If so we allocate new object there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  * When there is not enough room at the end, we start waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * each sub object until we reach object_offset+object_size >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  * alloc_size, this object then become the sub object we return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * Alignment can't be bigger than page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * Hole are not considered for allocation to keep things simple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * Assumption is that there won't be hole (all object on same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * alignment).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) struct radeon_sa_manager {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	wait_queue_head_t	wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct radeon_bo	*bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct list_head	*hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct list_head	flist[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct list_head	olist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	unsigned		size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	void			*cpu_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	uint32_t		domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	uint32_t		align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) struct radeon_sa_bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) /* sub-allocation buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) struct radeon_sa_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct list_head		olist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct list_head		flist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct radeon_sa_manager	*manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	unsigned			soffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	unsigned			eoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct radeon_fence		*fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * GEM objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) struct radeon_gem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct mutex		mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	struct list_head	objects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) int radeon_gem_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) void radeon_gem_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				int alignment, int initial_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				u32 flags, bool kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 				struct drm_gem_object **obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) int radeon_mode_dumb_create(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			    struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			    struct drm_mode_create_dumb *args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) int radeon_mode_dumb_mmap(struct drm_file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			  struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			  uint32_t handle, uint64_t *offset_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * Semaphores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) struct radeon_semaphore {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct radeon_sa_bo	*sa_bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	signed			waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) int radeon_semaphore_create(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			    struct radeon_semaphore **semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				  struct radeon_semaphore *semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				struct radeon_semaphore *semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) void radeon_semaphore_free(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			   struct radeon_semaphore **semaphore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			   struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * Synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) struct radeon_sync {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	struct radeon_fence	*sync_to[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	struct radeon_fence	*last_vm_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) void radeon_sync_create(struct radeon_sync *sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) void radeon_sync_fence(struct radeon_sync *sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		       struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) int radeon_sync_resv(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		     struct radeon_sync *sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		     struct dma_resv *resv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		     bool shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) int radeon_sync_rings(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		      struct radeon_sync *sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		      int waiting_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		      struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * GART structures, functions & helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) struct radeon_mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) #define RADEON_GPU_PAGE_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) #define RADEON_GPU_PAGE_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) #define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) #define RADEON_GART_PAGE_DUMMY  0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) #define RADEON_GART_PAGE_VALID	(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) #define RADEON_GART_PAGE_READ	(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) #define RADEON_GART_PAGE_WRITE	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) #define RADEON_GART_PAGE_SNOOP	(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) struct radeon_gart {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	dma_addr_t			table_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct radeon_bo		*robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	void				*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	unsigned			num_gpu_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	unsigned			num_cpu_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	unsigned			table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct page			**pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	uint64_t			*pages_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	bool				ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) void radeon_gart_table_ram_free(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) void radeon_gart_table_vram_free(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) int radeon_gart_table_vram_pin(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) int radeon_gart_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) void radeon_gart_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			int pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		     int pages, struct page **pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		     dma_addr_t *dma_addr, uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * GPU MC structures, functions & helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) struct radeon_mc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	resource_size_t		aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	resource_size_t		aper_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	resource_size_t		agp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	/* for some chips with <= 32MB we need to lie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	 * about vram size near mc fb location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	u64			mc_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	u64			visible_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	u64			gtt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	u64			gtt_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	u64			gtt_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	u64			vram_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	u64			vram_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	unsigned		vram_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	u64			real_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	int			vram_mtrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	bool			vram_is_ddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	bool			igp_sideport_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	u64                     gtt_base_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	u64                     mc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) bool radeon_combios_sideport_present(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) bool radeon_atombios_sideport_present(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  * GPU scratch registers structures, functions & helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) struct radeon_scratch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	unsigned		num_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	uint32_t                reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	bool			free[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	uint32_t		reg[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * GPU doorbell structures, functions & helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) #define RADEON_MAX_DOORBELLS 1024	/* Reserve at most 1024 doorbell slots for radeon-owned rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) struct radeon_doorbell {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/* doorbell mmio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	resource_size_t		base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	resource_size_t		size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	u32 __iomem		*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	u32			num_doorbells;	/* Number of doorbells actually reserved for radeon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * IRQS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) struct radeon_flip_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct work_struct		flip_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	struct work_struct		unpin_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct radeon_device		*rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	int				crtc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	u32				target_vblank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	uint64_t			base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct drm_pending_vblank_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct radeon_bo		*old_rbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct dma_fence		*fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	bool				async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) struct r500_irq_stat_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	u32 disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	u32 hdmi0_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) struct r600_irq_stat_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	u32 disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	u32 disp_int_cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	u32 disp_int_cont2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	u32 d1grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	u32 d2grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	u32 hdmi0_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	u32 hdmi1_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) struct evergreen_irq_stat_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	u32 disp_int[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	u32 grph_int[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	u32 afmt_status[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) struct cik_irq_stat_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	u32 disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	u32 disp_int_cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	u32 disp_int_cont2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	u32 disp_int_cont3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	u32 disp_int_cont4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	u32 disp_int_cont5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	u32 disp_int_cont6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	u32 d1grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	u32 d2grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	u32 d3grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	u32 d4grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	u32 d5grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	u32 d6grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) union radeon_irq_stat_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	struct r500_irq_stat_regs r500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	struct r600_irq_stat_regs r600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	struct evergreen_irq_stat_regs evergreen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct cik_irq_stat_regs cik;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) struct radeon_irq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	bool				installed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	atomic_t			ring_int[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	bool				crtc_vblank_int[RADEON_MAX_CRTCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	atomic_t			pflip[RADEON_MAX_CRTCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	wait_queue_head_t		vblank_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	bool				hpd[RADEON_MAX_HPD_PINS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	union radeon_irq_stat_regs	stat_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	bool				dpm_thermal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) int radeon_irq_kms_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) void radeon_irq_kms_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * CP & rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) struct radeon_ib {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	struct radeon_sa_bo		*sa_bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	uint32_t			length_dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	uint64_t			gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	uint32_t			*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	int				ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct radeon_fence		*fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct radeon_vm		*vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	bool				is_const_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct radeon_sync		sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) struct radeon_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct radeon_bo	*ring_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	volatile uint32_t	*ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	unsigned		rptr_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	unsigned		rptr_save_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	u64			next_rptr_gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	volatile u32		*next_rptr_cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	unsigned		wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned		wptr_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	unsigned		ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	unsigned		ring_free_dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	int			count_dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	atomic_t		last_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	atomic64_t		last_activity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	uint32_t		align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	uint32_t		ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	bool			ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	u32			nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	u32			idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	u64			last_semaphore_signal_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	u64			last_semaphore_wait_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/* for CIK queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	u32 me;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	u32 pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	u32 queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct radeon_bo	*mqd_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	u32 doorbell_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	unsigned		wptr_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) struct radeon_mec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct radeon_bo	*hpd_eop_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	u64			hpd_eop_gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	u32 num_pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	u32 num_mec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	u32 num_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  * VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) /* maximum number of VMIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) #define RADEON_NUM_VM	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) /* number of entries in page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) #define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) /* PTBs (Page Table Blocks) need to be aligned to 32K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) #define RADEON_VM_PTB_ALIGN_SIZE   32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) #define R600_PTE_VALID		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) #define R600_PTE_SYSTEM		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) #define R600_PTE_SNOOPED	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) #define R600_PTE_READABLE	(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) #define R600_PTE_WRITEABLE	(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) /* PTE (Page Table Entry) fragment field for different page sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) #define R600_PTE_FRAG_4KB	(0 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) #define R600_PTE_FRAG_64KB	(4 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) #define R600_PTE_FRAG_256KB	(6 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) /* flags needed to be set so we can copy directly from the GART table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) #define R600_PTE_GART_MASK	( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				  R600_PTE_SYSTEM | R600_PTE_VALID )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) struct radeon_vm_pt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct radeon_bo		*bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	uint64_t			addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) struct radeon_vm_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	unsigned		id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	uint64_t		pd_gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	/* last flushed PD/PT update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	struct radeon_fence	*flushed_updates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/* last use of vmid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	struct radeon_fence	*last_id_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) struct radeon_vm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct mutex		mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	struct rb_root_cached	va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	/* protecting invalidated and freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	spinlock_t		status_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/* BOs moved, but not yet updated in the PT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	struct list_head	invalidated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	/* BOs freed, but not yet updated in the PT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	struct list_head	freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* BOs cleared in the PT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct list_head	cleared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	/* contains the page directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	struct radeon_bo	*page_directory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	unsigned		max_pde_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/* array of page tables, one for each page directory entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct radeon_vm_pt	*page_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	struct radeon_bo_va	*ib_bo_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	/* for id and flush management per ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	struct radeon_vm_id	ids[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) struct radeon_vm_manager {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct radeon_fence		*active[RADEON_NUM_VM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	uint32_t			max_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	/* number of VMIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	unsigned			nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	/* vram base address for page table entry  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	u64				vram_base_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/* is vm enabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	bool				enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	/* for hw to save the PD addr on suspend/resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	uint32_t			saved_table_addr[RADEON_NUM_VM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  * file private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) struct radeon_fpriv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct radeon_vm		vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * R6xx+ IH ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) struct r600_ih {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct radeon_bo	*ring_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	volatile uint32_t	*ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	unsigned		rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	unsigned		ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	uint32_t		ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	atomic_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	bool                    enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  * RLC stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) #include "clearstate_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) struct radeon_rlc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/* for power gating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct radeon_bo	*save_restore_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	uint64_t		save_restore_gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	volatile uint32_t	*sr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	const u32               *reg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	u32                     reg_list_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	/* for clear state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct radeon_bo	*clear_state_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	uint64_t		clear_state_gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	volatile uint32_t	*cs_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	const struct cs_section_def   *cs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	u32                     clear_state_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* for cp tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	struct radeon_bo	*cp_table_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	uint64_t		cp_table_gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	volatile uint32_t	*cp_table_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	u32                     cp_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) int radeon_ib_get(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		  struct radeon_ib *ib, struct radeon_vm *vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		  unsigned size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		       struct radeon_ib *const_ib, bool hdp_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int radeon_ib_pool_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) void radeon_ib_pool_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int radeon_ib_ring_tests(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Ring access between begin & end cannot sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				      struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			bool hdp_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			       bool hdp_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) void radeon_ring_undo(struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) void radeon_ring_lockup_update(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			       struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			    uint32_t **data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			unsigned size, uint32_t *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		     unsigned rptr_offs, u32 nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* r600 async dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) void r600_dma_stop(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int r600_dma_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) void r600_dma_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) void cayman_dma_stop(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int cayman_dma_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) void cayman_dma_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * CS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct radeon_cs_chunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	uint32_t		length_dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	uint32_t		*kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	void __user		*user_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct radeon_cs_parser {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct radeon_device	*rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct drm_file		*filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	/* chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	unsigned		nchunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	struct radeon_cs_chunk	*chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	uint64_t		*chunks_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	/* IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	unsigned		idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	/* relocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	unsigned		nrelocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct radeon_bo_list	*relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	struct radeon_bo_list	*vm_bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct list_head	validated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	unsigned		dma_reloc_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	/* indices of various chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct radeon_cs_chunk  *chunk_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	struct radeon_cs_chunk  *chunk_relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	struct radeon_cs_chunk  *chunk_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	struct radeon_cs_chunk  *chunk_const_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct radeon_ib	ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct radeon_ib	const_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	void			*track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	unsigned		family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	int			parser_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	u32			cs_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	u32			ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	s32			priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct ww_acquire_ctx	ticket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	struct radeon_cs_chunk *ibc = p->chunk_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (ibc->kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return ibc->kdata[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	return p->ib.ptr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct radeon_cs_packet {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	unsigned	idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	unsigned	type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	unsigned	reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	unsigned	opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	int		count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	unsigned	one_reg_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				      struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 				      unsigned idx, unsigned reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				      struct radeon_cs_packet *pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * AGP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) int radeon_agp_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) void radeon_agp_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) void radeon_agp_suspend(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) void radeon_agp_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  * Writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct radeon_wb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	struct radeon_bo	*wb_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	volatile uint32_t	*wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	bool                    enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	bool                    use_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #define RADEON_WB_SCRATCH_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #define RADEON_WB_RING0_NEXT_RPTR 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #define RADEON_WB_CP_RPTR_OFFSET 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) #define RADEON_WB_CP1_RPTR_OFFSET 1280
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #define RADEON_WB_CP2_RPTR_OFFSET 1536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) #define R600_WB_DMA_RPTR_OFFSET   1792
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) #define R600_WB_IH_WPTR_OFFSET   2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) #define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) #define R600_WB_EVENT_OFFSET     3072
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) #define CIK_WB_CP1_WPTR_OFFSET     3328
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) #define CIK_WB_CP2_WPTR_OFFSET     3584
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) #define R600_WB_DMA_RING_TEST_OFFSET 3588
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) #define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  * struct radeon_pm - power management datas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  * @needed_bandwidth:   current bandwidth needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * It keeps track of various data needed to take powermanagement decision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  * Bandwidth need is used to determine minimun clock of the GPU and memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  * Equation between gpu/memory clock and available bandwidth is hw dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * (type of memory, bus size, efficiency, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) enum radeon_pm_method {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	PM_METHOD_PROFILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	PM_METHOD_DYNPM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	PM_METHOD_DPM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) enum radeon_dynpm_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	DYNPM_STATE_DISABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	DYNPM_STATE_MINIMUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	DYNPM_STATE_PAUSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	DYNPM_STATE_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	DYNPM_STATE_SUSPENDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) enum radeon_dynpm_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	DYNPM_ACTION_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	DYNPM_ACTION_MINIMUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	DYNPM_ACTION_DOWNCLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	DYNPM_ACTION_UPCLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	DYNPM_ACTION_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) enum radeon_voltage_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	VOLTAGE_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	VOLTAGE_GPIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	VOLTAGE_VDDC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	VOLTAGE_SW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) enum radeon_pm_state_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	/* not used for dpm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	POWER_STATE_TYPE_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	POWER_STATE_TYPE_POWERSAVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	/* user selectable states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	POWER_STATE_TYPE_BATTERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	POWER_STATE_TYPE_BALANCED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	POWER_STATE_TYPE_PERFORMANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/* internal states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	POWER_STATE_TYPE_INTERNAL_UVD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	POWER_STATE_TYPE_INTERNAL_UVD_SD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	POWER_STATE_TYPE_INTERNAL_UVD_HD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	POWER_STATE_TYPE_INTERNAL_UVD_HD2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	POWER_STATE_TYPE_INTERNAL_UVD_MVC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	POWER_STATE_TYPE_INTERNAL_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	POWER_STATE_TYPE_INTERNAL_THERMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	POWER_STATE_TYPE_INTERNAL_ACPI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	POWER_STATE_TYPE_INTERNAL_ULV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	POWER_STATE_TYPE_INTERNAL_3DPERF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) enum radeon_pm_profile_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	PM_PROFILE_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	PM_PROFILE_AUTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	PM_PROFILE_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	PM_PROFILE_MID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	PM_PROFILE_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) #define PM_PROFILE_DEFAULT_IDX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) #define PM_PROFILE_LOW_SH_IDX  1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #define PM_PROFILE_MID_SH_IDX  2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) #define PM_PROFILE_HIGH_SH_IDX 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #define PM_PROFILE_LOW_MH_IDX  4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) #define PM_PROFILE_MID_MH_IDX  5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #define PM_PROFILE_HIGH_MH_IDX 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) #define PM_PROFILE_MAX         7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct radeon_pm_profile {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	int dpms_off_ps_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	int dpms_on_ps_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	int dpms_off_cm_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	int dpms_on_cm_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) enum radeon_int_thermal_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	THERMAL_TYPE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	THERMAL_TYPE_EXTERNAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	THERMAL_TYPE_EXTERNAL_GPIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	THERMAL_TYPE_RV6XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	THERMAL_TYPE_RV770,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	THERMAL_TYPE_EVERGREEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	THERMAL_TYPE_SUMO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	THERMAL_TYPE_NI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	THERMAL_TYPE_SI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	THERMAL_TYPE_CI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	THERMAL_TYPE_KV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct radeon_voltage {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	enum radeon_voltage_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	/* gpio voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct radeon_gpio_rec gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	u32 delay; /* delay in usec from voltage drop to sclk change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	bool active_high; /* voltage drop is active when bit is high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	/* VDDC voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	u8 vddc_id; /* index into vddc voltage table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	u8 vddci_id; /* index into vddci voltage table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	bool vddci_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	/* r6xx+ sw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	u16 voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	/* evergreen+ vddci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	u16 vddci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* clock mode flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) #define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct radeon_pm_clock_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	/* memory clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	u32 mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	/* engine clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	u32 sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	/* voltage info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	struct radeon_voltage voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	/* standardized clock flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) #define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct radeon_power_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	enum radeon_pm_state_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	struct radeon_pm_clock_info *clock_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	/* number of valid clock modes in this power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	int num_clock_modes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	struct radeon_pm_clock_info *default_clock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	/* standardized state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	u32 misc; /* vbios specific flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	u32 misc2; /* vbios specific flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	int pcie_lanes; /* pcie lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  * Some modes are overclocked by very low value, accept them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) enum radeon_dpm_auto_throttle_src {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) enum radeon_dpm_event_src {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	RADEON_DPM_EVENT_SRC_ANALOG = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	RADEON_DPM_EVENT_SRC_DIGITAL = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) #define RADEON_MAX_VCE_LEVELS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) enum radeon_vce_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	RADEON_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	RADEON_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	RADEON_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	RADEON_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct radeon_ps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	u32 caps; /* vbios flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	u32 class; /* vbios flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	u32 class2; /* vbios flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	/* UVD clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	u32 vclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	u32 dclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	/* VCE clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	u32 evclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	u32 ecclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	bool vce_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	enum radeon_vce_level vce_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	/* asic priv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	void *ps_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct radeon_dpm_thermal {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	/* thermal interrupt work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	/* low temperature threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	int                min_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/* high temperature threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	int                max_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* was interrupt low to high or high to low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	bool               high_to_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) enum radeon_clk_action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	RADEON_SCLK_UP = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	RADEON_SCLK_DOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct radeon_blacklist_clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	u32 sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	u32 mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	enum radeon_clk_action action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct radeon_clock_and_voltage_limits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	u32 sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	u32 mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	u16 vddc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	u16 vddci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct radeon_clock_array {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	u32 *values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct radeon_clock_voltage_dependency_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	u32 clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct radeon_clock_voltage_dependency_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	struct radeon_clock_voltage_dependency_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) union radeon_cac_leakage_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		u16 vddc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		u32 leakage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		u16 vddc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		u16 vddc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		u16 vddc3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct radeon_cac_leakage_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	union radeon_cac_leakage_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct radeon_phase_shedding_limits_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	u16 voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	u32 sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	u32 mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct radeon_phase_shedding_limits_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	struct radeon_phase_shedding_limits_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct radeon_uvd_clock_voltage_dependency_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	u32 vclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	u32 dclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct radeon_uvd_clock_voltage_dependency_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	u8 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	struct radeon_uvd_clock_voltage_dependency_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct radeon_vce_clock_voltage_dependency_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	u32 ecclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	u32 evclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct radeon_vce_clock_voltage_dependency_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	u8 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	struct radeon_vce_clock_voltage_dependency_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct radeon_ppm_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	u8 ppm_design;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	u16 cpu_core_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	u32 platform_tdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	u32 small_ac_platform_tdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	u32 platform_tdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	u32 small_ac_platform_tdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	u32 apu_tdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	u32 dgpu_tdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	u32 dgpu_ulv_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	u32 tj_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct radeon_cac_tdp_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	u16 tdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	u16 configurable_tdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	u16 tdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	u16 battery_power_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	u16 small_power_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	u16 low_cac_leakage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	u16 high_cac_leakage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	u16 maximum_power_delivery_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct radeon_dpm_dynamic_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	struct radeon_clock_array valid_sclk_values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	struct radeon_clock_array valid_mclk_values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	u32 mclk_sclk_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	u32 sclk_mclk_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	u16 vddc_vddci_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	u16 min_vddc_for_pcie_gen2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	struct radeon_cac_leakage_table cac_leakage_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	struct radeon_ppm_table *ppm_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	struct radeon_cac_tdp_table *cac_tdp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct radeon_dpm_fan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	u16 t_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	u16 t_med;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	u16 t_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	u16 pwm_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	u16 pwm_med;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	u16 pwm_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	u8 t_hyst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	u32 cycle_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	u16 t_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	u8 control_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	u16 default_max_fan_pwm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	u16 default_fan_output_sensitivity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	u16 fan_output_sensitivity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	bool ucode_fan_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) enum radeon_pcie_gen {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	RADEON_PCIE_GEN1 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	RADEON_PCIE_GEN2 = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	RADEON_PCIE_GEN3 = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	RADEON_PCIE_GEN_INVALID = 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) enum radeon_dpm_forced_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	RADEON_DPM_FORCED_LEVEL_AUTO = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	RADEON_DPM_FORCED_LEVEL_LOW = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	RADEON_DPM_FORCED_LEVEL_HIGH = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct radeon_vce_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	/* vce clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	u32 evclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	u32 ecclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	/* gpu clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	u32 sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	u32 mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	u8 clk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	u8 pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct radeon_dpm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	struct radeon_ps        *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	/* number of valid power states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	int                     num_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	/* current power state that is active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	struct radeon_ps        *current_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	/* requested power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	struct radeon_ps        *requested_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	/* boot up power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	struct radeon_ps        *boot_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	/* default uvd power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	struct radeon_ps        *uvd_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	/* vce requirements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	enum radeon_vce_level vce_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	enum radeon_pm_state_type state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	enum radeon_pm_state_type user_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	u32                     platform_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	u32                     voltage_response_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	u32                     backbias_response_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	void                    *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	u32			new_active_crtcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	int			new_active_crtc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	int			high_pixelclock_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	u32			current_active_crtcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	int			current_active_crtc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	bool single_display;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	struct radeon_dpm_dynamic_state dyn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	struct radeon_dpm_fan fan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	u32 tdp_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	u32 near_tdp_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	u32 near_tdp_limit_adjusted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	u32 sq_ramping_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	u32 cac_leakage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	u16 tdp_od_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	u32 tdp_adjustment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	u16 load_line_slope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	bool power_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	bool ac_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	/* special states active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	bool                    thermal_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	bool                    uvd_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	bool                    vce_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	/* thermal handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	struct radeon_dpm_thermal thermal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	/* forced levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	enum radeon_dpm_forced_level forced_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	/* track UVD streams */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	unsigned sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	unsigned hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct radeon_pm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	struct mutex		mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	/* write locked while reprogramming mclk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct rw_semaphore	mclk_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	u32			active_crtcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	int			active_crtc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	int			req_vblank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	bool			vblank_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	fixed20_12		max_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	fixed20_12		igp_sideport_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	fixed20_12		igp_system_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	fixed20_12		igp_ht_link_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	fixed20_12		igp_ht_link_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	fixed20_12		k8_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	fixed20_12		sideport_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	fixed20_12		ht_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	fixed20_12		core_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	fixed20_12		sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	fixed20_12		mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	fixed20_12		needed_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	struct radeon_power_state *power_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	/* number of valid power states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	int                     num_power_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	int                     current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	int                     current_clock_mode_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	int                     requested_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	int                     requested_clock_mode_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	int                     default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	u32                     current_sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	u32                     current_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	u16                     current_vddc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	u16                     current_vddci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	u32                     default_sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	u32                     default_mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	u16                     default_vddc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	u16                     default_vddci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct radeon_i2c_chan *i2c_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	/* selected pm method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	enum radeon_pm_method     pm_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	/* dynpm power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	struct delayed_work	dynpm_idle_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	enum radeon_dynpm_state	dynpm_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	enum radeon_dynpm_action	dynpm_planned_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	unsigned long		dynpm_action_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	bool                    dynpm_can_upclock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	bool                    dynpm_can_downclock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* profile-based power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	enum radeon_pm_profile_type profile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	int                     profile_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	/* internal thermal controller on rv6xx+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	enum radeon_int_thermal_type int_thermal_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	struct device	        *int_hwmon_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	/* fan control parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	bool                    no_fan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	u8                      fan_pulses_per_revolution;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	u8                      fan_min_rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	u8                      fan_max_rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	/* dpm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	bool                    dpm_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	bool                    sysfs_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	struct radeon_dpm       dpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) #define RADEON_PCIE_SPEED_25 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) #define RADEON_PCIE_SPEED_50 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) #define RADEON_PCIE_SPEED_80 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) int radeon_pm_get_type_index(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			     enum radeon_pm_state_type ps_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			     int instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)  * UVD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) #define RADEON_DEFAULT_UVD_HANDLES	10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) #define RADEON_MAX_UVD_HANDLES		30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) #define RADEON_UVD_STACK_SIZE		(200*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) #define RADEON_UVD_HEAP_SIZE		(256*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) #define RADEON_UVD_SESSION_SIZE		(50*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct radeon_uvd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	bool			fw_header_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	struct radeon_bo	*vcpu_bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	void			*cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	unsigned		max_handles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	unsigned		img_size[RADEON_MAX_UVD_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	struct delayed_work	idle_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) int radeon_uvd_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) void radeon_uvd_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) int radeon_uvd_suspend(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) int radeon_uvd_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			      uint32_t handle, struct radeon_fence **fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			       uint32_t handle, struct radeon_fence **fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 				       uint32_t allowed_domains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) void radeon_uvd_free_handles(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			     struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) void radeon_uvd_note_usage(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				  unsigned vclk, unsigned dclk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				  unsigned vco_min, unsigned vco_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				  unsigned fb_factor, unsigned fb_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				  unsigned pd_min, unsigned pd_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				  unsigned pd_even,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				  unsigned *optimal_fb_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				  unsigned *optimal_vclk_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 				  unsigned *optimal_dclk_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)                                 unsigned cg_upll_func_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  * VCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) #define RADEON_MAX_VCE_HANDLES	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct radeon_vce {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	struct radeon_bo	*vcpu_bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	uint64_t		gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	unsigned		fw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	unsigned		fb_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	unsigned		img_size[RADEON_MAX_VCE_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	struct delayed_work	idle_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	uint32_t		keyselect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) int radeon_vce_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) void radeon_vce_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int radeon_vce_suspend(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) int radeon_vce_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 			      uint32_t handle, struct radeon_fence **fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			       uint32_t handle, struct radeon_fence **fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) void radeon_vce_note_usage(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int radeon_vce_cs_parse(struct radeon_cs_parser *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			       struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			       struct radeon_semaphore *semaphore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			       bool emit_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) void radeon_vce_fence_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			   struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct r600_audio_pin {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	int			channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	int			rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	int			bits_per_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	u8			status_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	u8			category_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	u32			offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	bool			connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	u32			id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct r600_audio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	int num_pins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	struct radeon_audio_funcs *hdmi_funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	struct radeon_audio_funcs *dp_funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	struct radeon_audio_basic_funcs *funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)  * Benchmarking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) void radeon_benchmark(struct radeon_device *rdev, int test_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  * Testing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) void radeon_test_moves(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) void radeon_test_ring_sync(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			   struct radeon_ring *cpA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			   struct radeon_ring *cpB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) void radeon_test_syncing(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  * MMU Notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) #if defined(CONFIG_MMU_NOTIFIER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) void radeon_mn_unregister(struct radeon_bo *bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * Debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct radeon_debugfs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	struct drm_info_list	*files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	unsigned		num_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int radeon_debugfs_add_files(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			     struct drm_info_list *files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			     unsigned nfiles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) int radeon_debugfs_fence_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * ASIC ring specific functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct radeon_asic_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	/* ring read/write ptr handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	/* validating and patching of IBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	int (*cs_parse)(struct radeon_cs_parser *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	/* command emmit functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			       struct radeon_semaphore *semaphore, bool emit_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			 unsigned vm_id, uint64_t pd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	/* testing functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	/* deprecated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  * ASIC specific functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct radeon_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	int (*init)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	void (*fini)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	int (*resume)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	int (*suspend)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	void (*vga_set_state)(struct radeon_device *rdev, bool state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	int (*asic_reset)(struct radeon_device *rdev, bool hard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	/* Flush the HDP cache via MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	void (*mmio_hdp_flush)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	/* check if 3D engine is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	bool (*gui_idle)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	/* wait for mc_idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	int (*mc_wait_for_idle)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	/* get the reference clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	u32 (*get_xclk)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	/* get the gpu clock counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	/* get register for info ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	/* gart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		void (*tlb_flush)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		void (*set_page)(struct radeon_device *rdev, unsigned i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 				 uint64_t entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	} gart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		int (*init)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		void (*fini)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		void (*copy_pages)(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 				   struct radeon_ib *ib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 				   uint64_t pe, uint64_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 				   unsigned count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		void (*write_pages)(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 				    struct radeon_ib *ib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 				    uint64_t pe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 				    uint64_t addr, unsigned count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 				    uint32_t incr, uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		void (*set_pages)(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 				  struct radeon_ib *ib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 				  uint64_t pe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 				  uint64_t addr, unsigned count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 				  uint32_t incr, uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		void (*pad_ib)(struct radeon_ib *ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	} vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	/* ring specific callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	const struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	/* irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		int (*set)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		int (*process)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	} irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	/* displays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		/* display watermarks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		void (*bandwidth_update)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		/* get frame count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		/* wait for vblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		/* set backlight level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		/* get backlight level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		/* audio callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	} display;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	/* copy functions for bo handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		struct radeon_fence *(*blit)(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 					     uint64_t src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 					     uint64_t dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 					     unsigned num_gpu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 					     struct dma_resv *resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		u32 blit_ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		struct radeon_fence *(*dma)(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 					    uint64_t src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 					    uint64_t dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 					    unsigned num_gpu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 					    struct dma_resv *resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		u32 dma_ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		/* method used for bo copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		struct radeon_fence *(*copy)(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 					     uint64_t src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 					     uint64_t dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 					     unsigned num_gpu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 					     struct dma_resv *resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		/* ring used for bo copies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		u32 copy_ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	} copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	/* surfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		int (*set_reg)(struct radeon_device *rdev, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 				       uint32_t tiling_flags, uint32_t pitch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 				       uint32_t offset, uint32_t obj_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		void (*clear_reg)(struct radeon_device *rdev, int reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	} surface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	/* hotplug detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		void (*init)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		void (*fini)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	} hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	/* static power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		void (*misc)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		void (*prepare)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		void (*finish)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		void (*init_profile)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		void (*get_dynpm_state)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		int (*get_pcie_lanes)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		int (*get_temperature)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	} pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	/* dynamic power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		int (*init)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		void (*setup_asic)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		int (*enable)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		int (*late_enable)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		void (*disable)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		int (*pre_set_power_state)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		int (*set_power_state)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		void (*post_set_power_state)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		void (*display_configuration_changed)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		void (*fini)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		u32 (*get_sclk)(struct radeon_device *rdev, bool low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		u32 (*get_mclk)(struct radeon_device *rdev, bool low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		bool (*vblank_too_short)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		void (*enable_bapm)(struct radeon_device *rdev, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		u32 (*get_current_sclk)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		u32 (*get_current_mclk)(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	} dpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	/* pageflipping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	} pflip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  * Asic structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct r100_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	const unsigned		*reg_safe_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	unsigned		reg_safe_bm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	u32			hdp_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct r300_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	const unsigned		*reg_safe_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	unsigned		reg_safe_bm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	u32			resync_scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	u32			hdp_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct r600_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	unsigned		max_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	unsigned		max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	unsigned		max_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	unsigned		max_backends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	unsigned		max_gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	unsigned		max_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	unsigned		max_stack_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	unsigned		max_hw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	unsigned		max_gs_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	unsigned		sx_max_export_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	unsigned		sx_max_export_pos_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	unsigned		sx_max_export_smx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	unsigned		sq_num_cf_insts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	unsigned		tiling_nbanks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	unsigned		tiling_npipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	unsigned		tiling_group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	unsigned		tile_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	unsigned		backend_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	unsigned		active_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct rv770_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	unsigned		max_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	unsigned		max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	unsigned		max_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	unsigned		max_backends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	unsigned		max_gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	unsigned		max_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	unsigned		max_stack_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	unsigned		max_hw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	unsigned		max_gs_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	unsigned		sx_max_export_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	unsigned		sx_max_export_pos_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	unsigned		sx_max_export_smx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	unsigned		sq_num_cf_insts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	unsigned		sx_num_of_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	unsigned		sc_prim_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	unsigned		sc_hiz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	unsigned		sc_earlyz_tile_fifo_fize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	unsigned		tiling_nbanks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	unsigned		tiling_npipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	unsigned		tiling_group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	unsigned		tile_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	unsigned		backend_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	unsigned		active_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct evergreen_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	unsigned num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	unsigned max_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	unsigned max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	unsigned max_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	unsigned max_backends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	unsigned max_gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	unsigned max_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	unsigned max_stack_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	unsigned max_hw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	unsigned max_gs_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	unsigned sx_max_export_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	unsigned sx_max_export_pos_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	unsigned sx_max_export_smx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	unsigned sq_num_cf_insts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	unsigned sx_num_of_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	unsigned sc_prim_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	unsigned sc_hiz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	unsigned sc_earlyz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	unsigned tiling_nbanks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	unsigned tiling_npipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	unsigned tiling_group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	unsigned tile_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	unsigned backend_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	unsigned active_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct cayman_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	unsigned max_shader_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	unsigned max_pipes_per_simd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	unsigned max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	unsigned max_simds_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	unsigned max_backends_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	unsigned max_texture_channel_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	unsigned max_gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	unsigned max_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	unsigned max_gs_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	unsigned max_stack_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	unsigned sx_num_of_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	unsigned sx_max_export_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	unsigned sx_max_export_pos_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	unsigned sx_max_export_smx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	unsigned max_hw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	unsigned sq_num_cf_insts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	unsigned sc_prim_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	unsigned sc_hiz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	unsigned sc_earlyz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	unsigned num_shader_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	unsigned num_shader_pipes_per_simd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	unsigned num_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	unsigned num_simds_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	unsigned num_backends_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	unsigned backend_disable_mask_per_asic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	unsigned backend_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	unsigned num_texture_channel_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	unsigned mem_max_burst_length_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	unsigned mem_row_size_in_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	unsigned shader_engine_tile_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	unsigned num_gpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	unsigned multi_gpu_tile_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	unsigned tile_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	unsigned active_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct si_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	unsigned max_shader_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	unsigned max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	unsigned max_cu_per_sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	unsigned max_sh_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	unsigned max_backends_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	unsigned max_texture_channel_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	unsigned max_gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	unsigned max_gs_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	unsigned max_hw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	unsigned sc_prim_fifo_size_frontend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	unsigned sc_prim_fifo_size_backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	unsigned sc_hiz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	unsigned sc_earlyz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	unsigned num_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	unsigned backend_enable_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	unsigned backend_disable_mask_per_asic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	unsigned backend_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	unsigned num_texture_channel_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	unsigned mem_max_burst_length_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	unsigned mem_row_size_in_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	unsigned shader_engine_tile_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	unsigned num_gpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	unsigned multi_gpu_tile_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	unsigned tile_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	uint32_t tile_mode_array[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	uint32_t active_cus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) struct cik_asic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	unsigned max_shader_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	unsigned max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	unsigned max_cu_per_sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	unsigned max_sh_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	unsigned max_backends_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	unsigned max_texture_channel_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	unsigned max_gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	unsigned max_gs_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	unsigned max_hw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	unsigned sc_prim_fifo_size_frontend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	unsigned sc_prim_fifo_size_backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	unsigned sc_hiz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	unsigned sc_earlyz_tile_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	unsigned num_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	unsigned backend_enable_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	unsigned backend_disable_mask_per_asic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	unsigned backend_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	unsigned num_texture_channel_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	unsigned mem_max_burst_length_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	unsigned mem_row_size_in_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	unsigned shader_engine_tile_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	unsigned num_gpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	unsigned multi_gpu_tile_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	unsigned tile_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	uint32_t tile_mode_array[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	uint32_t macrotile_mode_array[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	uint32_t active_cus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) union radeon_asic_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	struct r300_asic	r300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	struct r100_asic	r100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	struct r600_asic	r600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	struct rv770_asic	rv770;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	struct evergreen_asic	evergreen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	struct cayman_asic	cayman;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	struct si_asic		si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	struct cik_asic		cik;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)  * asic initizalization from radeon_asic.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) void radeon_agp_disable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) int radeon_asic_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)  * IOCTL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			  struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			    struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 			     struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			 struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			   struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 			    struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 			   struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 				struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 			  struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 			  struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 			      struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			  struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 				struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 				struct drm_file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* VRAM scratch page for HDP bug, default vram page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct r600_vram_scratch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	struct radeon_bo		*robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	volatile uint32_t		*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	u64				gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)  * ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) struct radeon_atif_notification_cfg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	int command_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) struct radeon_atif_notifications {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	bool display_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	bool expansion_mode_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	bool thermal_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	bool forced_power_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	bool system_power_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	bool display_conf_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	bool px_gfx_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	bool brightness_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	bool dgpu_display_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) struct radeon_atif_functions {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	bool system_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	bool sbios_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	bool select_active_disp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	bool lid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	bool get_tv_standard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	bool set_tv_standard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	bool get_panel_expansion_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	bool set_panel_expansion_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	bool temperature_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	bool graphics_device_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) struct radeon_atif {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	struct radeon_atif_notifications notifications;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	struct radeon_atif_functions functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	struct radeon_atif_notification_cfg notification_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	struct radeon_encoder *encoder_for_bl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) struct radeon_atcs_functions {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	bool get_ext_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	bool pcie_perf_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	bool pcie_dev_rdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	bool pcie_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) struct radeon_atcs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	struct radeon_atcs_functions functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)  * Core structure, functions and helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) struct radeon_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	struct drm_device		*ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	struct pci_dev			*pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	struct rw_semaphore		exclusive_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	/* ASIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	union radeon_asic_config	config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	enum radeon_family		family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	unsigned long			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	int				usec_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	enum radeon_pll_errata		pll_errata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	int				num_gb_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	int				num_z_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	int				disp_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	/* BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	uint8_t				*bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	bool				is_atom_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	uint16_t			bios_header_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	struct radeon_bo		*stolen_vga_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	/* Register mmio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	resource_size_t			rmmio_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	resource_size_t			rmmio_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	/* protects concurrent MM_INDEX/DATA based register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	spinlock_t mmio_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	/* protects concurrent SMC based register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	spinlock_t smc_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	/* protects concurrent PLL register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	spinlock_t pll_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	/* protects concurrent MC register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	spinlock_t mc_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	/* protects concurrent PCIE register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	spinlock_t pcie_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	/* protects concurrent PCIE_PORT register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	spinlock_t pciep_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	/* protects concurrent PIF register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	spinlock_t pif_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/* protects concurrent CG register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	spinlock_t cg_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	/* protects concurrent UVD register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	spinlock_t uvd_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	/* protects concurrent RCU register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	spinlock_t rcu_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	/* protects concurrent DIDT register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	spinlock_t didt_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	/* protects concurrent ENDPOINT (audio) register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	spinlock_t end_idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	void __iomem			*rmmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	radeon_rreg_t			mc_rreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	radeon_wreg_t			mc_wreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	radeon_rreg_t			pll_rreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	radeon_wreg_t			pll_wreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	uint32_t                        pcie_reg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	radeon_rreg_t			pciep_rreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	radeon_wreg_t			pciep_wreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	/* io port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	void __iomem                    *rio_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	resource_size_t			rio_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	struct radeon_clock             clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	struct radeon_mc		mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	struct radeon_gart		gart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	struct radeon_mode_info		mode_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	struct radeon_scratch		scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	struct radeon_doorbell		doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	struct radeon_mman		mman;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	wait_queue_head_t		fence_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	u64				fence_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	struct mutex			ring_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	struct radeon_ring		ring[RADEON_NUM_RINGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	bool				ib_pool_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	struct radeon_sa_manager	ring_tmp_bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	struct radeon_irq		irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	struct radeon_asic		*asic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	struct radeon_gem		gem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	struct radeon_pm		pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	struct radeon_uvd		uvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	struct radeon_vce		vce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	struct radeon_wb		wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	struct radeon_dummy_page	dummy_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	bool				shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	bool				need_swiotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	bool				accel_working;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	bool				fastfb_working; /* IGP feature*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	bool				needs_reset, in_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	const struct firmware *me_fw;	/* all family ME firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	const struct firmware *mc_fw;	/* NI MC firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	const struct firmware *ce_fw;	/* SI CE firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	const struct firmware *mec_fw;	/* CIK MEC firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	const struct firmware *mec2_fw;	/* KV MEC2 firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	const struct firmware *sdma_fw;	/* CIK SDMA firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	const struct firmware *smc_fw;	/* SMC firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	const struct firmware *uvd_fw;	/* UVD firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	const struct firmware *vce_fw;	/* VCE firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	bool new_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	struct r600_vram_scratch vram_scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	int msi_enabled; /* msi enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	struct r600_ih ih; /* r6/700 interrupt ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	struct radeon_rlc rlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	struct radeon_mec mec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	struct delayed_work hotplug_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	struct work_struct dp_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	struct work_struct audio_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	int num_crtc; /* number of crtcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	bool has_uvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	bool has_vce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	struct r600_audio audio; /* audio stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	struct notifier_block acpi_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	/* only one userspace can use Hyperz features or CMASK at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	struct drm_file *hyperz_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	struct drm_file *cmask_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	/* i2c buses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	/* debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	unsigned 		debugfs_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	/* virtual memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	struct radeon_vm_manager	vm_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	struct mutex			gpu_clock_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	/* memory stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	atomic64_t			vram_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	atomic64_t			gtt_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	atomic64_t			num_bytes_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	atomic_t			gpu_reset_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	/* ACPI interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	struct radeon_atif		atif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	struct radeon_atcs		atcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	/* srbm instance registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	struct mutex			srbm_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	/* clock, powergating flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	u32 cg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	u32 pg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	struct dev_pm_domain vga_pm_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	bool have_disp_power_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	u32 px_quirk_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	/* tracking pinned memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	u64 vram_pin_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	u64 gart_pin_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) bool radeon_is_px(struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) int radeon_device_init(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		       struct drm_device *ddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		       struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		       uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) void radeon_device_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) #define RADEON_MIN_MMIO_SIZE 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 				    bool always_indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		return readl(((void __iomem *)rdev->rmmio) + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		return r100_mm_rreg_slow(rdev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 				bool always_indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		writel(v, ((void __iomem *)rdev->rmmio) + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		r100_mm_wreg_slow(rdev, reg, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)  * Cast helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) extern const struct dma_fence_ops radeon_fence_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (__f->base.ops == &radeon_fence_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		return __f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)  * Registers read & write functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) #define RREG8(reg) readb((rdev->rmmio) + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) #define RREG16(reg) readw((rdev->rmmio) + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) #define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) #define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) #define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 			    r100_mm_rreg(rdev, (reg), false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) #define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) #define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) #define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) #define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) #define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) #define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) #define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) #define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) #define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) #define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) #define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) #define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) #define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) #define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) #define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) #define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) #define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) #define WREG32_P(reg, val, mask)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		uint32_t tmp_ = RREG32(reg);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		tmp_ &= (mask);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		tmp_ |= ((val) & ~(mask));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		WREG32(reg, tmp_);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) #define WREG32_PLL_P(reg, val, mask)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		uint32_t tmp_ = RREG32_PLL(reg);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		tmp_ &= (mask);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		tmp_ |= ((val) & ~(mask));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 		WREG32_PLL(reg, tmp_);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) #define WREG32_SMC_P(reg, val, mask)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		uint32_t tmp_ = RREG32_SMC(reg);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		tmp_ &= (mask);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		tmp_ |= ((val) & ~(mask));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		WREG32_SMC(reg, tmp_);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) #define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) #define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)  * Indirect registers accessors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)  * They used to be inlined, but this increases code size by ~65 kbytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)  * Since each performs a pair of MMIO ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)  * within a spin_lock_irqsave/spin_unlock_irqrestore region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)  * the cost of call+ret is almost negligible. MMIO and locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)  * costs several dozens of cycles each at best, call+ret is ~5 cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) void r100_pll_errata_after_index(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)  * ASICs helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			    (rdev->pdev->device == 0x5969))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		(rdev->family == CHIP_RV200) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		(rdev->family == CHIP_RS100) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		(rdev->family == CHIP_RS200) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		(rdev->family == CHIP_RV250) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		(rdev->family == CHIP_RV280) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		(rdev->family == CHIP_RS300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) #define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		(rdev->family == CHIP_RV350) ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		(rdev->family == CHIP_R350)  ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		(rdev->family == CHIP_RV380) ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		(rdev->family == CHIP_R420)  ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		(rdev->family == CHIP_R423)  ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		(rdev->family == CHIP_RV410) ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		(rdev->family == CHIP_RS400) ||			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		(rdev->family == CHIP_RS480))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) #define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		(rdev->ddev->pdev->device == 0x9443) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 		(rdev->ddev->pdev->device == 0x944B) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		(rdev->ddev->pdev->device == 0x9506) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		(rdev->ddev->pdev->device == 0x9509) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		(rdev->ddev->pdev->device == 0x950F) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		(rdev->ddev->pdev->device == 0x689C) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		(rdev->ddev->pdev->device == 0x689D))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 			    (rdev->family == CHIP_RS690)  ||	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			    (rdev->family == CHIP_RS740)  ||	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 			    (rdev->family >= CHIP_R600))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 			     (rdev->flags & RADEON_IS_IGP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) #define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 			     (rdev->flags & RADEON_IS_IGP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) #define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 			     (rdev->family == CHIP_MULLINS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 			      (rdev->ddev->pdev->device == 0x6850) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 			      (rdev->ddev->pdev->device == 0x6858) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 			      (rdev->ddev->pdev->device == 0x6859) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 			      (rdev->ddev->pdev->device == 0x6840) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 			      (rdev->ddev->pdev->device == 0x6841) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 			      (rdev->ddev->pdev->device == 0x6842) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 			      (rdev->ddev->pdev->device == 0x6843))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)  * BIOS helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) #define RBIOS8(i) (rdev->bios[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) int radeon_combios_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) void radeon_combios_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) int radeon_atombios_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) void radeon_atombios_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)  * RING helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)  * radeon_ring_write - write a value to the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)  * @ring: radeon_ring structure holding ring information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)  * @v: dword (dw) value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)  * Write a value to the requested ring buffer (all asics).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	if (ring->count_dw <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	ring->ring[ring->wptr++] = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	ring->wptr &= ring->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	ring->count_dw--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	ring->ring_free_dw--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)  * ASICs macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) #define radeon_init(rdev) (rdev)->asic->init((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) #define radeon_fini(rdev) (rdev)->asic->fini((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) #define radeon_resume(rdev) (rdev)->asic->resume((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) #define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev), false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) #define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) #define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) #define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) #define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) #define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) #define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) #define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) #define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) #define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) #define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) #define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) #define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) #define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) #define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) #define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) #define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) #define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) #define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) #define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) #define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) #define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) #define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) #define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) #define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) #define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) #define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) #define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) #define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) #define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) #define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) #define radeon_page_flip(rdev, crtc, base, async) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) #define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) #define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) #define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) #define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) #define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) #define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) #define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) #define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) #define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) #define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) #define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) #define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) #define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) #define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) #define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) #define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* Common functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) /* AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) extern int radeon_gpu_reset(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) extern void radeon_pci_config_reset(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) extern void radeon_agp_disable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) extern int radeon_modeset_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) extern void radeon_modeset_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) extern bool radeon_card_posted(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) extern void radeon_update_display_priority(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) extern void radeon_scratch_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) extern void radeon_wb_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) extern int radeon_wb_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) extern void radeon_wb_disable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) extern void radeon_surface_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 				     struct ttm_tt *ttm, uint64_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 				     uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) extern int radeon_suspend_kms(struct drm_device *dev, bool suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 			      bool fbcon, bool freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) extern void radeon_program_register_sequence(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 					     const u32 *registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 					     const u32 array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  * vm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) int radeon_vm_manager_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) void radeon_vm_manager_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 					  struct radeon_vm *vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)                                           struct list_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 				       struct radeon_vm *vm, int ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) void radeon_vm_flush(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)                      struct radeon_vm *vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		     int ring, struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) void radeon_vm_fence(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 		     struct radeon_vm *vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		     struct radeon_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) int radeon_vm_update_page_directory(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 				    struct radeon_vm *vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) int radeon_vm_clear_freed(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 			  struct radeon_vm *vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) int radeon_vm_clear_invalids(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 			     struct radeon_vm *vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) int radeon_vm_bo_update(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 			struct radeon_bo_va *bo_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 			struct ttm_resource *mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) void radeon_vm_bo_invalidate(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 			     struct radeon_bo *bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 				       struct radeon_bo *bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 				      struct radeon_vm *vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 				      struct radeon_bo *bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) int radeon_vm_bo_set_addr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 			  struct radeon_bo_va *bo_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 			  uint64_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 			  uint32_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) void radeon_vm_bo_rmv(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		      struct radeon_bo_va *bo_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) /* audio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) void r600_audio_update_hdmi(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) void r600_audio_enable(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 		       struct r600_audio_pin *pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		       u8 enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) void dce6_audio_enable(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 		       struct r600_audio_pin *pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		       u8 enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)  * R600 vram scratch functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) int r600_vram_scratch_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) void r600_vram_scratch_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)  * r600 cs checking helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) unsigned r600_mip_minify(unsigned size, unsigned level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) bool r600_fmt_is_valid_color(u32 format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) int r600_fmt_get_blocksize(u32 format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) int r600_fmt_get_nblocksx(u32 format, u32 w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) int r600_fmt_get_nblocksy(u32 format, u32 h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)  * r600 functions used by radeon_encoder.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) struct radeon_hdmi_acr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	u32 clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	int n_32khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	int cts_32khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	int n_44_1khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	int cts_44_1khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	int n_48khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	int cts_48khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 				     u32 tiling_pipe_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 				     u32 max_rb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 				     u32 total_max_rb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 				     u32 enabled_rb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)  * evergreen functions used by radeon_encoder.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) extern int ni_init_microcode(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) extern int ni_mc_load_microcode(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) /* radeon_acpi.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) #if defined(CONFIG_ACPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) extern int radeon_acpi_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) extern void radeon_acpi_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 						u8 perf_req, bool advertise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) int radeon_cs_packet_parse(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 			   struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 			   unsigned idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) void radeon_cs_dump_packet(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 			   struct radeon_cs_packet *pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 				struct radeon_bo_list **cs_reloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 				int nomm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 			       uint32_t *vline_start_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 			       uint32_t *vline_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) /* interrupt control register helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 				      u32 reg, u32 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 				      bool enable, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 				      unsigned n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) #include "radeon_object.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) #endif