Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright 2010 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * Authors: Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <drm/drm_vblank.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <drm/radeon_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "atom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "avivod.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "evergreen_blit_shaders.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "evergreen_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "evergreend.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "radeon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "radeon_asic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "radeon_audio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "radeon_ucode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define DC_HPDx_CONTROL(x)        (DC_HPD1_CONTROL     + (x * 0xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define DC_HPDx_INT_CONTROL(x)    (DC_HPD1_INT_CONTROL + (x * 0xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS  + (x * 0xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * Indirect registers accessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	r = RREG32(EVERGREEN_CG_IND_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	WREG32(EVERGREEN_CG_IND_DATA, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static const u32 crtc_offsets[6] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	EVERGREEN_CRTC0_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	EVERGREEN_CRTC1_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	EVERGREEN_CRTC2_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	EVERGREEN_CRTC3_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	EVERGREEN_CRTC4_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	EVERGREEN_CRTC5_REGISTER_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #include "clearstate_evergreen.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static const u32 sumo_rlc_save_restore_register_list[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	0x98fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	0x9830,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	0x9834,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	0x9838,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	0x9870,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	0x9874,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	0x8a14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	0x8b24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	0x8bcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	0x8b10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	0x8d00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	0x8d04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	0x8c00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	0x8c04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	0x8c08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	0x8c0c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	0x8d8c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	0x8c20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	0x8c24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	0x8c28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	0x8c18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	0x8c1c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	0x8cf0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	0x8e2c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	0x8e38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	0x8c30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	0x9508,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	0x9688,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	0x9608,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	0x960c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	0x9610,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	0x9614,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	0x88c4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	0x88d4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	0xa008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	0x900c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	0x9100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	0x913c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	0x98f8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	0x98f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	0x9b7c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	0x3f8c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	0x8950,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	0x8954,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	0x8a18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	0x8b28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	0x9144,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	0x9148,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	0x914c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	0x3f90,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	0x3f94,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	0x915c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	0x9160,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	0x9178,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	0x917c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	0x9180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	0x918c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	0x9190,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	0x9194,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	0x9198,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	0x919c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	0x91a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	0x91ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	0x91b0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	0x91b4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	0x91b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	0x91c4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	0x91c8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	0x91cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	0x91d0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	0x91d4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	0x91e0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	0x91e4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	0x91ec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	0x91f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	0x91f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	0x9200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	0x9204,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	0x929c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	0x9150,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	0x802c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static void evergreen_gpu_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) void evergreen_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) void evergreen_program_aspm(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 				     int ring, u32 cp_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) extern void cayman_vm_decode_fault(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 				   u32 status, u32 addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) void cik_init_cp_pg_table(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) extern u32 si_get_csb_size(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) extern u32 cik_get_csb_size(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static const u32 evergreen_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	0x3f90, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	0x9148, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	0x9b7c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	0x8a14, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	0x8b10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	0x88c4, 0xffffffff, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	0x88d4, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	0x8974, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	0x5e78, 0xffffffff, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	0x6104, 0x01000300, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	0x5bc0, 0x00300000, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	0x7030, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	0x7c30, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	0x10830, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	0x11430, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	0x12030, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	0x12c30, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	0xd02c, 0xffffffff, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	0x240c, 0xffffffff, 0x00000380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	0x8b24, 0xffffffff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	0x28a4c, 0x06000000, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	0x10c, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	0x8d00, 0xffffffff, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	0x8d04, 0xffffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	0x8c00, 0xffffffff, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	0x8c04, 0xffffffff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	0x8c08, 0xffffffff, 0x001c001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	0x8cf0, 0xffffffff, 0x08e00620,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	0x8c20, 0xffffffff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	0x8c24, 0xffffffff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	0x8c18, 0xffffffff, 0x20202078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	0x8c1c, 0xffffffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	0x28350, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	0x5c4, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	0x9508, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	0x913c, 0x0000000f, 0x0000000a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) static const u32 evergreen_golden_registers2[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	0x2f4c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	0x54f4, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	0x54f0, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	0x5498, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	0x549c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	0x5494, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	0x53cc, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	0x53c8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	0x53c4, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	0x53c0, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	0x53bc, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	0x53b8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	0x53b4, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	0x53b0, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static const u32 cypress_mgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	0x802c, 0xffffffff, 0xc0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	0x5448, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	0x55e4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	0x5644, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	0x8d58, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	0x9654, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	0x9040, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	0xa200, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	0xa204, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	0xa208, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	0xa20c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	0x971c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	0x977c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	0x3f80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	0xa210, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	0xa214, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	0x4d8, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	0x9784, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	0x9698, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	0x4d4, 0xffffffff, 0x00000200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	0x30cc, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	0xd0c0, 0xffffffff, 0xff000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	0x802c, 0xffffffff, 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	0x9160, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	0x9178, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	0x9180, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	0x918c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	0x9190, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	0x9194, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	0x919c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	0x91a8, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	0x91ac, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	0x91b0, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	0x91b4, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	0x91b8, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	0x91c4, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	0x91c8, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	0x91cc, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	0x91d0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	0x91d4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	0x91e0, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	0x91e4, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	0x91e8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	0x91ec, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	0x91f0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	0x91f4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	0x9200, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	0x9204, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	0x9208, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	0x920c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	0x9210, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	0x921c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	0x9220, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	0x9224, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	0x9228, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	0x922c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	0x9238, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	0x923c, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	0x9240, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	0x9244, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	0x9248, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	0x9254, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	0x9258, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	0x925c, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	0x9260, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	0x9264, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	0x9270, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	0x9274, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	0x9278, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	0x927c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	0x9280, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	0x928c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	0x9290, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	0x9294, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	0x802c, 0xffffffff, 0x40010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	0x9160, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	0x9178, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	0x9180, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	0x918c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	0x9190, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	0x9194, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	0x919c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	0x91a8, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	0x91ac, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	0x91b0, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	0x91b4, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	0x91b8, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	0x91c4, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	0x91c8, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	0x91cc, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	0x91d0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	0x91d4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	0x91e0, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	0x91e4, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	0x91e8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	0x91ec, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	0x91f0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	0x91f4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	0x9200, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	0x9204, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	0x9208, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	0x920c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	0x9210, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	0x921c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	0x9220, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	0x9224, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	0x9228, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	0x922c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	0x9238, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	0x923c, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	0x9240, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	0x9244, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	0x9248, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	0x9254, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	0x9258, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	0x925c, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	0x9260, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	0x9264, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	0x9270, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	0x9274, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	0x9278, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	0x927c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	0x9280, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	0x928c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	0x9290, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	0x9294, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	0x802c, 0xffffffff, 0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) static const u32 redwood_mgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	0x802c, 0xffffffff, 0xc0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	0x5448, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	0x55e4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	0x5644, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	0x8d58, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	0x9654, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	0x9040, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	0xa200, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	0xa204, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	0xa208, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	0xa20c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	0x971c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	0x977c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	0x3f80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	0xa210, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	0xa214, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	0x4d8, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	0x9784, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	0x9698, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	0x4d4, 0xffffffff, 0x00000200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	0x30cc, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	0xd0c0, 0xffffffff, 0xff000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	0x802c, 0xffffffff, 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	0x9160, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	0x9178, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	0x9180, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	0x918c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	0x9190, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	0x9194, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	0x919c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	0x91a8, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	0x91ac, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	0x91b0, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	0x91b4, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	0x91b8, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	0x91c4, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	0x91c8, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	0x91cc, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	0x91d0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	0x91d4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	0x91e0, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	0x91e4, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	0x91e8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	0x91ec, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	0x91f0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	0x91f4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	0x9200, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	0x9204, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	0x9294, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	0x802c, 0xffffffff, 0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static const u32 cedar_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	0x3f90, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	0x9148, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	0x9b7c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	0x8a14, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	0x8b10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	0x88c4, 0xffffffff, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	0x88d4, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	0x8974, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	0x5e78, 0xffffffff, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	0x6104, 0x01000300, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	0x5bc0, 0x00300000, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	0x7030, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	0x7c30, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	0x10830, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	0x11430, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	0xd02c, 0xffffffff, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	0x240c, 0xffffffff, 0x00000380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	0x8b24, 0xffffffff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	0x28a4c, 0x06000000, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	0x10c, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	0x8d00, 0xffffffff, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	0x8d04, 0xffffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	0x8c00, 0xffffffff, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	0x8c04, 0xffffffff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	0x8c08, 0xffffffff, 0x001c001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	0x8cf0, 0xffffffff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	0x8c20, 0xffffffff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	0x8c24, 0xffffffff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	0x8c18, 0xffffffff, 0x20202078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	0x8c1c, 0xffffffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	0x28350, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	0x5c4, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	0x9508, 0xffffffff, 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static const u32 cedar_mgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	0x802c, 0xffffffff, 0xc0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	0x5448, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	0x55e4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	0x5644, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	0x8d58, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	0x9654, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	0x9040, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	0xa200, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	0xa204, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	0xa208, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	0xa20c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	0x971c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	0x977c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	0x3f80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	0xa210, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	0xa214, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	0x4d8, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	0x9784, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	0x9698, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	0x4d4, 0xffffffff, 0x00000200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	0x30cc, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	0xd0c0, 0xffffffff, 0xff000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	0x802c, 0xffffffff, 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	0x9178, 0xffffffff, 0x00050000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	0x918c, 0xffffffff, 0x00010004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	0x9190, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	0x9194, 0xffffffff, 0x00050000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	0x91a8, 0xffffffff, 0x00010004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	0x91ac, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	0x91e8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	0x9294, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	0x802c, 0xffffffff, 0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) static const u32 juniper_mgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	0x802c, 0xffffffff, 0xc0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	0x5448, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	0x55e4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	0x5644, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	0x8d58, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	0x9654, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	0x9040, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	0xa200, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	0xa204, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	0xa208, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	0xa20c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	0x971c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	0xd0c0, 0xffffffff, 0xff000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	0x802c, 0xffffffff, 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	0x9160, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	0x9178, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	0x9180, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	0x918c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	0x9190, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	0x9194, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	0x919c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	0x91a8, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	0x91ac, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	0x91b0, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	0x91b4, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	0x91b8, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	0x91c4, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	0x91c8, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	0x91cc, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	0x91d0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	0x91d4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	0x91e0, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	0x91e4, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	0x91e8, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	0x91ec, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	0x91f0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	0x91f4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	0x9200, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	0x9204, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	0x9208, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	0x920c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	0x9210, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	0x921c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	0x9220, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	0x9224, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	0x9228, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	0x922c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	0x9238, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	0x923c, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	0x9240, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	0x9244, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	0x9248, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	0x9254, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	0x9258, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	0x925c, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	0x9260, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	0x9264, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	0x9270, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	0x9274, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	0x9278, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	0x927c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	0x9280, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	0x928c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	0x9290, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	0x9294, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	0x802c, 0xffffffff, 0xc0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	0x977c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	0x3f80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	0xa210, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	0xa214, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	0x4d8, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	0x9784, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	0x9698, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	0x4d4, 0xffffffff, 0x00000200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	0x30cc, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	0x802c, 0xffffffff, 0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static const u32 supersumo_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	0x5c4, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	0x7030, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	0x7c30, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	0x6104, 0x01000300, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	0x5bc0, 0x00300000, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	0x8c04, 0xffffffff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	0x8c08, 0xffffffff, 0x001c001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	0x8c20, 0xffffffff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	0x8c24, 0xffffffff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	0x8c18, 0xffffffff, 0x20202078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	0x8c1c, 0xffffffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	0x918c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	0x91a8, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	0x91c4, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	0x91e0, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	0x9200, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	0x9150, 0xffffffff, 0x6e944040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	0x9180, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	0x919c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	0x91b4, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	0x91b8, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	0x91d0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	0x91d4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	0x91f0, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	0x91f4, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	0x9160, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	0x3f90, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	0x9178, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	0x9194, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	0x91b0, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	0x91cc, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	0x91ec, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	0x9148, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	0x9190, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	0x91ac, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	0x91c8, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	0x91e4, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	0x9204, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	0x5644, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	0x9b7c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	0x8030, 0xffffffff, 0x0000100a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	0x8a14, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	0x8b24, 0xffffffff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	0x8b10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	0x28a4c, 0x06000000, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	0x4d8, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	0x913c, 0xffff000f, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	0x88c4, 0xffffffff, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	0x88d4, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	0x8974, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	0x5e78, 0xffffffff, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	0xd02c, 0xffffffff, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	0x8d00, 0xffffffff, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	0x8d04, 0xffffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	0x8c00, 0xffffffff, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	0x8cf0, 0x1fffffff, 0x08e00620,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	0x28350, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	0x9508, 0xffffffff, 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static const u32 sumo_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	0x900c, 0x00ffffff, 0x0017071f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	0x8c18, 0xffffffff, 0x10101060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	0x8c1c, 0xffffffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	0x8c30, 0x0000000f, 0x00000005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	0x9688, 0x0000000f, 0x00000007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) static const u32 wrestler_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	0x5c4, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	0x7030, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	0x7c30, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	0x6104, 0x01000300, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	0x5bc0, 0x00300000, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	0x918c, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	0x91a8, 0xffffffff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	0x9150, 0xffffffff, 0x6e944040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	0x917c, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	0x9198, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	0x915c, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	0x3f90, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	0x9178, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	0x9194, 0xffffffff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	0x9148, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	0x9190, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	0x91ac, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	0x929c, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	0x9b7c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	0x8030, 0xffffffff, 0x0000100a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	0x8a14, 0xffffffff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	0x8b24, 0xffffffff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	0x8b10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	0x28a4c, 0x06000000, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	0x4d8, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	0x913c, 0xffff000f, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	0x88c4, 0xffffffff, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	0x88d4, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	0x8974, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	0x5e78, 0xffffffff, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	0xd02c, 0xffffffff, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	0x8d00, 0xffffffff, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	0x8d04, 0xffffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	0x8c00, 0xffffffff, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	0x8cf0, 0x1fffffff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	0x28350, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	0x9508, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	0x900c, 0xffffffff, 0x0017071f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	0x8c18, 0xffffffff, 0x10101060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	0x8c1c, 0xffffffff, 0x00001010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) static const u32 barts_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	0x5e78, 0x8f311ff1, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	0x3f90, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	0x9148, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	0xbd4, 0x70073777, 0x00010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	0xd0b8, 0x03773777, 0x02011003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	0x98f8, 0x33773777, 0x02011003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	0x98fc, 0xffffffff, 0x76543210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	0x2f48, 0x00000007, 0x02011003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	0x6b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	0x7728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	0x10328, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	0x10f28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	0x11b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	0x12728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	0x240c, 0x000007ff, 0x00000380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	0x10c, 0x00000001, 0x00010003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	0xa02c, 0xffffffff, 0x0000009b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	0x913c, 0x0000000f, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	0x8d00, 0xffff7f7f, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	0x8d04, 0x00ffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	0x8c00, 0xfffc0003, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	0x8c08, 0x00ff00ff, 0x001c001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	0x8cf0, 0x1fff1fff, 0x08e00620,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	0x8c20, 0x0fff0fff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	0x8c24, 0x0fff0fff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	0x8c18, 0xffffffff, 0x20202078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	0x8c1c, 0x0000ffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	0x9508, 0x3700001f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	0x88c4, 0x001f3ae3, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	0x8974, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) static const u32 turks_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	0x5e78, 0x8f311ff1, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	0x8c8, 0x00003000, 0x00001070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	0x8cc, 0x000fffff, 0x00040035,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	0x3f90, 0xffff0000, 0xfff00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	0x9148, 0xffff0000, 0xfff00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	0x3f94, 0xffff0000, 0xfff00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	0x914c, 0xffff0000, 0xfff00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	0xbd4, 0x00073007, 0x00010002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	0xd0b8, 0x03773777, 0x02010002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	0x98f8, 0x33773777, 0x00010002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	0x98fc, 0xffffffff, 0x33221100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	0x2f48, 0x33773777, 0x00010002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	0x6b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	0x7728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	0x10328, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	0x10f28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	0x11b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	0x12728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	0x240c, 0x000007ff, 0x00000380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	0x10c, 0x00000001, 0x00010003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	0xa02c, 0xffffffff, 0x0000009b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	0x913c, 0x0000000f, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	0x8d00, 0xffff7f7f, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	0x8d04, 0x00ffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	0x8c00, 0xfffc0003, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	0x8c08, 0x00ff00ff, 0x001c001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	0x8cf0, 0x1fff1fff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	0x8c20, 0x0fff0fff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	0x8c24, 0x0fff0fff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	0x8c18, 0xffffffff, 0x20202078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	0x8c1c, 0x0000ffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	0x9508, 0x3700001f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	0x88c4, 0x001f3ae3, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	0x8974, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static const u32 caicos_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	0x5e78, 0x8f311ff1, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	0x8c8, 0x00003420, 0x00001450,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	0x8cc, 0x000fffff, 0x00040035,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	0x3f90, 0xffff0000, 0xfffc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	0x9148, 0xffff0000, 0xfffc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	0x3f94, 0xffff0000, 0xfffc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	0x914c, 0xffff0000, 0xfffc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	0xbd4, 0x00073007, 0x00010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	0xd0b8, 0x03773777, 0x02010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	0x98f8, 0x33773777, 0x02010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	0x98fc, 0xffffffff, 0x33221100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	0x2f48, 0x33773777, 0x02010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	0x6b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	0x7728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	0x10328, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	0x10f28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	0x11b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	0x12728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	0x240c, 0x000007ff, 0x00000380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	0x8a14, 0xf000001f, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	0x10c, 0x00000001, 0x00010003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	0xa02c, 0xffffffff, 0x0000009b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	0x913c, 0x0000000f, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	0x8d00, 0xffff7f7f, 0x100e4848,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	0x8d04, 0x00ffffff, 0x00164745,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	0x8c00, 0xfffc0003, 0xe4000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	0x8c08, 0x00ff00ff, 0x001c001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	0x8cf0, 0x1fff1fff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	0x8c20, 0x0fff0fff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	0x8c24, 0x0fff0fff, 0x00800080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	0x8c18, 0xffffffff, 0x20202078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	0x8c1c, 0x0000ffff, 0x00001010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	0x9508, 0x3700001f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	0x88c4, 0x001f3ae3, 0x000000c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	0x8974, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void evergreen_init_golden_registers(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	case CHIP_CYPRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	case CHIP_HEMLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 						 evergreen_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 						 evergreen_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 						 cypress_mgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 						 (const u32)ARRAY_SIZE(cypress_mgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	case CHIP_JUNIPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 						 evergreen_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 						 evergreen_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 						 juniper_mgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 						 (const u32)ARRAY_SIZE(juniper_mgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	case CHIP_REDWOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 						 evergreen_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 						 evergreen_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 						 redwood_mgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 						 (const u32)ARRAY_SIZE(redwood_mgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 						 cedar_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 						 (const u32)ARRAY_SIZE(cedar_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 						 evergreen_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 						 cedar_mgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 						 (const u32)ARRAY_SIZE(cedar_mgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 						 wrestler_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 						 (const u32)ARRAY_SIZE(wrestler_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 						 supersumo_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 						 supersumo_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 						 sumo_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 						 (const u32)ARRAY_SIZE(sumo_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	case CHIP_BARTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 						 barts_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 						 (const u32)ARRAY_SIZE(barts_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	case CHIP_TURKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 						 turks_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 						 (const u32)ARRAY_SIZE(turks_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	case CHIP_CAICOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 						 caicos_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 						 (const u32)ARRAY_SIZE(caicos_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  * evergreen_get_allowed_info_register - fetch the register for the info ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  * @reg: register offset in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * @val: register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * Returns 0 for success or -EINVAL for an invalid register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int evergreen_get_allowed_info_register(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 					u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	case GRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	case GRBM_STATUS_SE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	case GRBM_STATUS_SE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	case SRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	case SRBM_STATUS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	case DMA_STATUS_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	case UVD_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		*val = RREG32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			     unsigned *bankh, unsigned *mtaspect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			     unsigned *tile_split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	switch (*bankw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	switch (*bankh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	switch (*mtaspect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			      u32 cntl_reg, u32 status_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	struct atom_clock_dividers dividers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 					   clock, false, &dividers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		if (RREG32(status_reg) & DCLK_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (i == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	u32 cg_scratch = RREG32(CG_SCRATCH1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	cg_scratch &= 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	cg_scratch |= vclk / 100; /* Mhz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	cg_scratch &= 0x0000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	cg_scratch |= (dclk / 100) << 16; /* Mhz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	WREG32(CG_SCRATCH1, cg_scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	/* start off with something large */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	/* bypass vclk and dclk with bclk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	/* put PLL in bypass mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (!vclk || !dclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		/* keep the Bypass mode, put PLL to sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 					  16384, 0x03FFFFFF, 0, 128, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 					  &fb_div, &vclk_div, &dclk_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	/* set VCO_MODE to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	/* toggle UPLL_SLEEP to 1 then back to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	/* deassert UPLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	/* assert UPLL_RESET again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	/* disable spread spectrum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	/* set feedback divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	/* set ref divider to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	if (fb_div < 307200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	/* set PDIV_A and PDIV_B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	/* give the PLL some time to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	/* deassert PLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	/* switch from bypass mode to normal mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	/* switch VCLK and DCLK selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	mdelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	int readrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	readrq = pcie_get_readrq(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	v = ffs(readrq) - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * to avoid hangs or perfomance issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if ((v == 0) || (v == 6) || (v == 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		pcie_set_readrq(rdev->pdev, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) void dce4_program_fmt(struct drm_encoder *encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	struct drm_device *dev = encoder->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	int bpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	u32 tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (connector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		bpc = radeon_get_monitor_bpc(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		dither = radeon_connector->dither;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	/* LVDS/eDP FMT is set up by atom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	/* not needed for analog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (bpc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	switch (bpc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		if (dither == RADEON_FMT_DITHER_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			/* XXX sort out optimal dither settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				FMT_SPATIAL_DITHER_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			tmp |= FMT_TRUNCATE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		if (dither == RADEON_FMT_DITHER_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			/* XXX sort out optimal dither settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 				FMT_RGB_RANDOM_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		/* not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	u32 pos1, pos2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (pos1 != pos2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * dce4_wait_for_vblank - vblank wait asic callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * @crtc: crtc to wait for vblank on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * Wait for vblank on the requested crtc (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	unsigned i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (crtc >= rdev->num_crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	/* depending on when we hit vblank, we may be close to active; if so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	 * wait for another frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	while (dce4_is_in_vblank(rdev, crtc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		if (i++ % 100 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			if (!dce4_is_counter_moving(rdev, crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	while (!dce4_is_in_vblank(rdev, crtc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		if (i++ % 100 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			if (!dce4_is_counter_moving(rdev, crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  * evergreen_page_flip - pageflip callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  * @crtc_id: crtc to cleanup pageflip on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  * @crtc_base: new address of the crtc (GPU MC address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  * Triggers the actual pageflip by updating the primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  * surface base address (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			 bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	/* update the scanout addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	       async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	       upper_32_bits(crtc_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	       (u32)crtc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	/* post the write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * evergreen_page_flip_pending - check if page flip is still pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  * @crtc_id: crtc to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * Returns the current update pending status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	/* Return current update_pending status: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* get temperature in millidegrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) int evergreen_get_temp(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	u32 temp, toffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	int actual_temp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (rdev->family == CHIP_JUNIPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			TOFFSET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			TS0_ADC_DOUT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		if (toffset & 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			actual_temp = temp / 2 - (0x200 - toffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			actual_temp = temp / 2 + toffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		actual_temp = actual_temp * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			ASIC_T_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		if (temp & 0x400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			actual_temp = -256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		else if (temp & 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			actual_temp = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		else if (temp & 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			actual_temp = temp & 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			actual_temp |= ~0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			actual_temp = temp & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		actual_temp = (actual_temp * 1000) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	return actual_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) int sumo_get_temp(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	int actual_temp = temp - 49;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	return actual_temp * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  * sumo_pm_init_profile - Initialize power profiles callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * Initialize the power states used in profile mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * (sumo, trinity, SI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  * Used for profile mode only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) void sumo_pm_init_profile(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	/* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	/* low,mid sh/mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	if (rdev->flags & RADEON_IS_MOBILITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	/* high sh/mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		rdev->pm.power_state[idx].num_clock_modes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		rdev->pm.power_state[idx].num_clock_modes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  * btc_pm_init_profile - Initialize power profiles callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  * Initialize the power states used in profile mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  * (BTC, cayman).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  * Used for profile mode only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) void btc_pm_init_profile(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	/* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	/* starting with BTC, there is one state that is used for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	 * MH and SH.  Difference is that we always use the high clock index for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	 * mclk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	if (rdev->flags & RADEON_IS_MOBILITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	/* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	/* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	/* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	/* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	/* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	/* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  * evergreen_pm_misc - set additional pm hw parameters callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)  * Set non-clock parameters associated with a power state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)  * (voltage, etc.) (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) void evergreen_pm_misc(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	int req_ps_idx = rdev->pm.requested_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (voltage->type == VOLTAGE_SW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		/* 0xff0x are flags rather then an actual voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		if ((voltage->voltage & 0xff00) == 0xff00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			rdev->pm.current_vddc = voltage->voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		/* starting with BTC, there is one state that is used for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		 * MH and SH.  Difference is that we always use the high clock index for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		 * mclk and vddci.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		    (rdev->family >= CHIP_BARTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		    rdev->pm.active_crtc_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			voltage = &rdev->pm.power_state[req_ps_idx].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		/* 0xff0x are flags rather then an actual voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		if ((voltage->vddci & 0xff00) == 0xff00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 			rdev->pm.current_vddci = voltage->vddci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)  * evergreen_pm_prepare - pre-power state change callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)  * Prepare for a power state change (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) void evergreen_pm_prepare(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	struct drm_device *ddev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct radeon_crtc *radeon_crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	/* disable any active CRTCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		radeon_crtc = to_radeon_crtc(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		if (radeon_crtc->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)  * evergreen_pm_finish - post-power state change callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)  * Clean up after a power state change (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) void evergreen_pm_finish(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	struct drm_device *ddev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	struct radeon_crtc *radeon_crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	/* enable any active CRTCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		radeon_crtc = to_radeon_crtc(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		if (radeon_crtc->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  * evergreen_hpd_sense - hpd sense callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * @hpd: hpd (hotplug detect) pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  * Checks if a digital monitor is connected (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  * Returns true if connected, false if not connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	if (hpd == RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	return !!(RREG32(DC_HPDx_INT_STATUS_REG(hpd)) & DC_HPDx_SENSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  * evergreen_hpd_set_polarity - hpd set polarity callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)  * @hpd: hpd (hotplug detect) pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)  * Set the polarity of the hpd pin (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) void evergreen_hpd_set_polarity(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 				enum radeon_hpd_id hpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	bool connected = evergreen_hpd_sense(rdev, hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (hpd == RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		WREG32_AND(DC_HPDx_INT_CONTROL(hpd), ~DC_HPDx_INT_POLARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		WREG32_OR(DC_HPDx_INT_CONTROL(hpd), DC_HPDx_INT_POLARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  * evergreen_hpd_init - hpd setup callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  * Setup the hpd pins used by the card (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  * Enable the pin, set the polarity, and enable the hpd interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) void evergreen_hpd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	unsigned enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		enum radeon_hpd_id hpd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			to_radeon_connector(connector)->hpd.hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			 * aux dp channel on imac and help (but not completely fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			 * also avoid interrupt storms during dpms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		if (hpd == RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		WREG32(DC_HPDx_CONTROL(hpd), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		enabled |= 1 << hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		radeon_hpd_set_polarity(rdev, hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	radeon_irq_kms_enable_hpd(rdev, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  * evergreen_hpd_fini - hpd tear down callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  * Tear down the hpd pins used by the card (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  * Disable the hpd interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) void evergreen_hpd_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	unsigned disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		enum radeon_hpd_id hpd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			to_radeon_connector(connector)->hpd.hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		if (hpd == RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		WREG32(DC_HPDx_CONTROL(hpd), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		disabled |= 1 << hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	radeon_irq_kms_disable_hpd(rdev, disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* watermark setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 					struct radeon_crtc *radeon_crtc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 					struct drm_display_mode *mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 					struct drm_display_mode *other_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	u32 tmp, buffer_alloc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	 * Line Buffer Setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	 * There are 3 line buffers, each one shared by 2 display controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	 * the display controllers.  The paritioning is done via one of four
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	 * preset allocations specified in bits 2:0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	 * first display controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	 *  0 - first half of lb (3840 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	 *  1 - first 3/4 of lb (5760 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	 *  2 - whole lb (7680 * 2), other crtc must be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	 *  3 - first 1/4 of lb (1920 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	 * second display controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	 *  4 - second half of lb (3840 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	 *  5 - second 3/4 of lb (5760 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	 *  6 - whole lb (7680 * 2), other crtc must be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	 *  7 - last 1/4 of lb (1920 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	/* this can get tricky if we have two large displays on a paired group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	 * non-linked crtcs for maximum line buffer allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	if (radeon_crtc->base.enabled && mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		if (other_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			tmp = 0; /* 1/2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			buffer_alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			tmp = 2; /* whole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			buffer_alloc = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		buffer_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	/* second controller of the pair uses second half of the lb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	if (radeon_crtc->crtc_id % 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		tmp += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			    DMIF_BUFFERS_ALLOCATED_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (radeon_crtc->base.enabled && mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		switch (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			if (ASIC_IS_DCE5(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 				return 4096 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 				return 3840 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			if (ASIC_IS_DCE5(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 				return 6144 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 				return 5760 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 			if (ASIC_IS_DCE5(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 				return 8192 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 				return 7680 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			if (ASIC_IS_DCE5(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 				return 2048 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 				return 1920 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	/* controller not enabled, so no lb used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	u32 tmp = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct evergreen_wm_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	u32 dram_channels; /* number of dram channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	u32 yclk;          /* bandwidth per dram data pin in kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	u32 sclk;          /* engine clock in kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	u32 disp_clk;      /* display clock in kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	u32 src_width;     /* viewport width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	u32 active_time;   /* active display time in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	u32 blank_time;    /* blank time in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	bool interlaced;    /* mode is interlaced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	fixed20_12 vsc;    /* vertical scale ratio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	u32 num_heads;     /* number of active crtcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	u32 lb_size;       /* line buffer allocated to pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	u32 vtaps;         /* vertical scaler taps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	/* Calculate DRAM Bandwidth and the part allocated to display. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	fixed20_12 dram_efficiency; /* 0.7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	fixed20_12 yclk, dram_channels, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	yclk.full = dfixed_const(wm->yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	yclk.full = dfixed_div(yclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	dram_efficiency.full = dfixed_const(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	bandwidth.full = dfixed_mul(dram_channels, yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	/* Calculate DRAM Bandwidth and the part allocated to display. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	fixed20_12 yclk, dram_channels, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	yclk.full = dfixed_const(wm->yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	yclk.full = dfixed_div(yclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	bandwidth.full = dfixed_mul(dram_channels, yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	/* Calculate the display Data return Bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	fixed20_12 return_efficiency; /* 0.8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	fixed20_12 sclk, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	sclk.full = dfixed_const(wm->sclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	sclk.full = dfixed_div(sclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	return_efficiency.full = dfixed_const(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	return_efficiency.full = dfixed_div(return_efficiency, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	a.full = dfixed_const(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	bandwidth.full = dfixed_mul(a, sclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	/* Calculate the DMIF Request Bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	fixed20_12 disp_clk, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	disp_clk.full = dfixed_const(wm->disp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	disp_clk.full = dfixed_div(disp_clk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	disp_clk_request_efficiency.full = dfixed_const(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	a.full = dfixed_const(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	bandwidth.full = dfixed_mul(a, disp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	/* Calculate the display mode Average Bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	 * DisplayMode should contain the source and destination dimensions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	 * timing, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	fixed20_12 bpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	fixed20_12 line_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	fixed20_12 src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	fixed20_12 bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	line_time.full = dfixed_div(line_time, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	bpp.full = dfixed_const(wm->bytes_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	src_width.full = dfixed_const(wm->src_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	bandwidth.full = dfixed_mul(src_width, bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	bandwidth.full = dfixed_div(bandwidth, line_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	/* First calcualte the latency in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	u32 mc_latency = 2000; /* 2000 ns. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	u32 available_bandwidth = evergreen_available_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		(wm->num_heads * cursor_line_pair_return_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	fixed20_12 a, b, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	if (wm->num_heads == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	a.full = dfixed_const(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	b.full = dfixed_const(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	if ((wm->vsc.full > a.full) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	    (wm->vtaps >= 5) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	    ((wm->vsc.full >= a.full) && wm->interlaced))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		max_src_lines_per_dst_line = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		max_src_lines_per_dst_line = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	a.full = dfixed_const(available_bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	b.full = dfixed_const(wm->num_heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	a.full = dfixed_div(a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	lb_fill_bw = min(dfixed_trunc(a), wm->disp_clk * wm->bytes_per_pixel / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	b.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	c.full = dfixed_const(lb_fill_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	b.full = dfixed_div(c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	a.full = dfixed_div(a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	line_fill_time = dfixed_trunc(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	if (line_fill_time < wm->active_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		return latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		return latency + (line_fill_time - wm->active_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	if (evergreen_average_bandwidth(wm) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	if (evergreen_average_bandwidth(wm) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	    (evergreen_available_bandwidth(wm) / wm->num_heads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	u32 lb_partitions = wm->lb_size / wm->src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	u32 line_time = wm->active_time + wm->blank_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	u32 latency_tolerant_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	u32 latency_hiding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	a.full = dfixed_const(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (wm->vsc.full > a.full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		latency_tolerant_lines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		if (lb_partitions <= (wm->vtaps + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 			latency_tolerant_lines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 			latency_tolerant_lines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	if (evergreen_latency_watermark(wm) <= latency_hiding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static void evergreen_program_watermarks(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 					 struct radeon_crtc *radeon_crtc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 					 u32 lb_size, u32 num_heads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	struct drm_display_mode *mode = &radeon_crtc->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	struct evergreen_wm_params wm_low, wm_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	u32 dram_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	u32 active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	u32 line_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	u32 priority_a_mark = 0, priority_b_mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	u32 priority_a_cnt = PRIORITY_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	u32 priority_b_cnt = PRIORITY_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	u32 pipe_offset = radeon_crtc->crtc_id * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	u32 tmp, arb_control3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	fixed20_12 a, b, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	if (radeon_crtc->base.enabled && num_heads && mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 					    (u32)mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 					  (u32)mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		line_time = min(line_time, (u32)65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		priority_a_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		priority_b_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		dram_channels = evergreen_get_number_of_dram_channels(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		/* watermark for high clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			wm_high.yclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				radeon_dpm_get_mclk(rdev, false) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			wm_high.sclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				radeon_dpm_get_sclk(rdev, false) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			wm_high.yclk = rdev->pm.current_mclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			wm_high.sclk = rdev->pm.current_sclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		wm_high.disp_clk = mode->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		wm_high.src_width = mode->crtc_hdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		wm_high.active_time = active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		wm_high.blank_time = line_time - wm_high.active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		wm_high.interlaced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			wm_high.interlaced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		wm_high.vsc = radeon_crtc->vsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		wm_high.vtaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		if (radeon_crtc->rmx_type != RMX_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 			wm_high.vtaps = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		wm_high.lb_size = lb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		wm_high.dram_channels = dram_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		wm_high.num_heads = num_heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		/* watermark for low clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			wm_low.yclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 				radeon_dpm_get_mclk(rdev, true) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			wm_low.sclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 				radeon_dpm_get_sclk(rdev, true) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			wm_low.yclk = rdev->pm.current_mclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			wm_low.sclk = rdev->pm.current_sclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		wm_low.disp_clk = mode->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		wm_low.src_width = mode->crtc_hdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		wm_low.active_time = active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		wm_low.blank_time = line_time - wm_low.active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		wm_low.interlaced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 			wm_low.interlaced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		wm_low.vsc = radeon_crtc->vsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		wm_low.vtaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		if (radeon_crtc->rmx_type != RMX_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			wm_low.vtaps = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		wm_low.lb_size = lb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		wm_low.dram_channels = dram_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		wm_low.num_heads = num_heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		/* set for high clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		/* set for low clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		/* possibly force display priority to high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		/* should really do this at mode validation time... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		    !evergreen_check_latency_hiding(&wm_high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		    (rdev->disp_priority == 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			DRM_DEBUG_KMS("force priority a to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		    !evergreen_check_latency_hiding(&wm_low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		    (rdev->disp_priority == 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 			DRM_DEBUG_KMS("force priority b to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		b.full = dfixed_const(mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		b.full = dfixed_div(b, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		c.full = dfixed_const(latency_watermark_a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		c.full = dfixed_mul(c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		c.full = dfixed_mul(c, radeon_crtc->hsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		a.full = dfixed_const(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		priority_a_mark = dfixed_trunc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		b.full = dfixed_const(mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		b.full = dfixed_div(b, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		c.full = dfixed_const(latency_watermark_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		c.full = dfixed_mul(c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		c.full = dfixed_mul(c, radeon_crtc->hsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		a.full = dfixed_const(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		priority_b_mark = dfixed_trunc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		/* Save number of lines the linebuffer leads before the scanout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	/* select wm A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	tmp = arb_control3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	tmp &= ~LATENCY_WATERMARK_MASK(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	tmp |= LATENCY_WATERMARK_MASK(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		LATENCY_HIGH_WATERMARK(line_time)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	/* select wm B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	tmp &= ~LATENCY_WATERMARK_MASK(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	tmp |= LATENCY_WATERMARK_MASK(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		LATENCY_HIGH_WATERMARK(line_time)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	/* restore original selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	/* write the priority marks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	/* save values for DPM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	radeon_crtc->line_time = line_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	radeon_crtc->wm_high = latency_watermark_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	radeon_crtc->wm_low = latency_watermark_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)  * evergreen_bandwidth_update - update display watermarks callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)  * Update the display watermarks based on the requested mode(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)  * (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) void evergreen_bandwidth_update(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	struct drm_display_mode *mode0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	struct drm_display_mode *mode1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	u32 num_heads = 0, lb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	if (!rdev->mode_info.mode_config_initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	radeon_update_display_priority(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		if (rdev->mode_info.crtcs[i]->base.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			num_heads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	for (i = 0; i < rdev->num_crtc; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)  * evergreen_mc_wait_for_idle - wait for MC idle callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)  * Wait for the MC (memory controller) to be idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)  * (evergreen+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)  * Returns 0 if the MC is idle, -1 if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		/* read MC_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		tmp = RREG32(SRBM_STATUS) & 0x1F00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)  * GART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		/* read MC_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		if (tmp == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 			pr_warn("[drm] r600 flush TLB failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	if (rdev->gart.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	r = radeon_gart_table_vram_pin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	/* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 				EFFECTIVE_L2_QUEUE_SIZE(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	/* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		if ((rdev->family == CHIP_JUNIPER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		    (rdev->family == CHIP_CYPRESS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		    (rdev->family == CHIP_HEMLOCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		    (rdev->family == CHIP_BARTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			(u32)(rdev->dummy_page.addr >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	WREG32(VM_CONTEXT1_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	evergreen_pcie_gart_tlb_flush(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		 (unsigned)(rdev->mc.gtt_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		 (unsigned long long)rdev->gart.table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	rdev->gart.ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	/* Disable all tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	WREG32(VM_CONTEXT0_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	WREG32(VM_CONTEXT1_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	/* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 				EFFECTIVE_L2_QUEUE_SIZE(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	/* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	radeon_gart_table_vram_unpin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	evergreen_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	radeon_gart_table_vram_free(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	radeon_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) static void evergreen_agp_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	/* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 				EFFECTIVE_L2_QUEUE_SIZE(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	/* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	WREG32(VM_CONTEXT0_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	WREG32(VM_CONTEXT1_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static const unsigned ni_dig_offsets[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	NI_DIG0_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	NI_DIG1_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	NI_DIG2_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	NI_DIG3_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	NI_DIG4_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	NI_DIG5_REGISTER_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) static const unsigned ni_tx_offsets[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) static const unsigned evergreen_dp_offsets[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	EVERGREEN_DP0_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	EVERGREEN_DP1_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	EVERGREEN_DP2_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	EVERGREEN_DP3_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	EVERGREEN_DP4_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	EVERGREEN_DP5_REGISTER_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) static const unsigned evergreen_disp_int_status[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	DISP_INTERRUPT_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	DISP_INTERRUPT_STATUS_CONTINUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	DISP_INTERRUPT_STATUS_CONTINUE2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	DISP_INTERRUPT_STATUS_CONTINUE3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	DISP_INTERRUPT_STATUS_CONTINUE4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	DISP_INTERRUPT_STATUS_CONTINUE5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)  * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)  * We go from crtc to connector and it is not relible  since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)  * should be an opposite direction .If crtc is enable then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)  * find the dig_fe which selects this crtc and insure that it enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)  * if such dig_fe is found then find dig_be which selects found dig_be and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)  * insure that it enable and in DP_SST mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)  * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)  * from dp symbols clocks .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 					       unsigned crtc_id, unsigned *ret_dig_fe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	unsigned dig_fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	unsigned dig_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	unsigned dig_en_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	unsigned uniphy_pll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	unsigned digs_fe_selected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	unsigned dig_be_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	unsigned dig_fe_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	bool is_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	bool found_crtc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	/* loop through all running dig_fe to find selected crtc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 			/* found running pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 			found_crtc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 			dig_fe_mask = 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 			dig_fe = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	if (found_crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		/* loop through all running dig_be to find selected dig_fe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 			/* if dig_fe_selected by dig_be? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			if (dig_fe_mask &  digs_fe_selected &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			    /* if dig_be in sst mode? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 			    dig_be_mode == NI_DIG_BE_DPSST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 						   ni_dig_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 						    ni_tx_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 				/* dig_be enable and tx is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 					is_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 					*ret_dig_fe = dig_fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	return is_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)  * Blank dig when in dp sst mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)  * Dig ignores crtc timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static void evergreen_blank_dp_output(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 				      unsigned dig_fe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	unsigned stream_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	unsigned fifo_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	unsigned counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 			     evergreen_dp_offsets[dig_fe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		DRM_ERROR("dig %d , should be enable\n", dig_fe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	       evergreen_dp_offsets[dig_fe], stream_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 			     evergreen_dp_offsets[dig_fe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 				     evergreen_dp_offsets[dig_fe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	if (counter >= 32 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		DRM_ERROR("counter exceeds %d\n", counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	u32 crtc_enabled, tmp, frame_count, blackout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	unsigned dig_fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	if (!ASIC_IS_NODCE(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		/* disable VGA render */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		WREG32(VGA_RENDER_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	/* blank the display controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		if (crtc_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 			save->crtc_enabled[i] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			if (ASIC_IS_DCE6(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 					radeon_wait_for_vblank(rdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 					radeon_wait_for_vblank(rdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 			/* wait for the next frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			frame_count = radeon_get_vblank_counter(rdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			for (j = 0; j < rdev->usec_timeout; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 				if (radeon_get_vblank_counter(rdev, i) != frame_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 				udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 			/*we should disable dig if it drives dp sst*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 			/*but we are in radeon_device_init and the topology is unknown*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 			/*and it is available after radeon_modeset_init*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 			/*the following method radeon_atom_encoder_dpms_dig*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 			/*does the job if we initialize it properly*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 			/*for now we do it this manually*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 			/**/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 			if (ASIC_IS_DCE5(rdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 				evergreen_blank_dp_output(rdev, dig_fe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			/*we could remove 6 lines below*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			save->crtc_enabled[i] = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			/* ***** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 			save->crtc_enabled[i] = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	radeon_mc_wait_for_idle(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		/* Block CPU access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		WREG32(BIF_FB_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 		/* blackout the MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		blackout &= ~BLACKOUT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	/* wait for the MC to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	/* lock double buffered regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		if (save->crtc_enabled[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 			if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 				tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 			if (!(tmp & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 				tmp |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	u32 tmp, frame_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	/* update crtc base addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		       upper_32_bits(rdev->mc.vram_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		       upper_32_bits(rdev->mc.vram_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		       (u32)rdev->mc.vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		       (u32)rdev->mc.vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	if (!ASIC_IS_NODCE(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	/* unlock regs and wait for update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		if (save->crtc_enabled[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 			if ((tmp & 0x7) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 				tmp &= ~0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 			if (tmp & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 				tmp &= ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 			for (j = 0; j < rdev->usec_timeout; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 				udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	/* unblackout the MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	tmp &= ~BLACKOUT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	/* allow CPU access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		if (save->crtc_enabled[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 			if (ASIC_IS_DCE6(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 				tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 			/* wait for the next frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 			frame_count = radeon_get_vblank_counter(rdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 			for (j = 0; j < rdev->usec_timeout; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 				if (radeon_get_vblank_counter(rdev, i) != frame_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 				udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	if (!ASIC_IS_NODCE(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		/* Unlock vga access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) void evergreen_mc_program(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	/* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	/* Lockout access through VGA aperture*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	/* Update configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 			/* VRAM before AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 				rdev->mc.vram_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 				rdev->mc.gtt_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 			/* VRAM after AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 				rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 				rdev->mc.vram_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 			rdev->mc.vram_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 			rdev->mc.vram_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	/* llano/ontario only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	if ((rdev->family == CHIP_PALM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	    (rdev->family == CHIP_SUMO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	    (rdev->family == CHIP_SUMO2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	WREG32(MC_VM_FB_LOCATION, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		WREG32(MC_VM_AGP_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	evergreen_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	/* we need to own VRAM, so turn off the VGA renderer here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	 * to stop it overwriting our objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	rv515_vga_render_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)  * CP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	struct radeon_ring *ring = &rdev->ring[ib->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	u32 next_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	/* set to DX10/11 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	radeon_ring_write(ring, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	if (ring->rptr_save_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		next_rptr = ring->wptr + 3 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		radeon_ring_write(ring, ((ring->rptr_save_reg - 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 					  PACKET3_SET_CONFIG_REG_START) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	} else if (rdev->wb.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 		next_rptr = ring->wptr + 5 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 		radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			  (2 << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 			  (ib->gpu_addr & 0xFFFFFFFC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	radeon_ring_write(ring, ib->length_dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) static int evergreen_cp_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	if (!rdev->me_fw || !rdev->pfp_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	r700_cp_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	WREG32(CP_RB_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	       BUF_SWAP_32BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	fw_data = (const __be32 *)rdev->pfp_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	fw_data = (const __be32 *)rdev->me_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	WREG32(CP_ME_RAM_RADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) static int evergreen_cp_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	uint32_t cp_me;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	r = radeon_ring_lock(rdev, ring, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	radeon_ring_write(ring, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	cp_me = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	WREG32(CP_ME_CNTL, cp_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	/* setup clear context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	for (i = 0; i < evergreen_default_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		radeon_ring_write(ring, evergreen_default_state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	/* set clear context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	/* SQ_VTX_BASE_VTX_LOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	radeon_ring_write(ring, 0xc0026f00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	radeon_ring_write(ring, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	radeon_ring_write(ring, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	radeon_ring_write(ring, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	/* Clear consts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	radeon_ring_write(ring, 0xc0036f00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	radeon_ring_write(ring, 0x00000bc4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	radeon_ring_write(ring, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	radeon_ring_write(ring, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	radeon_ring_write(ring, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	radeon_ring_write(ring, 0xc0026900);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	radeon_ring_write(ring, 0x00000316);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	radeon_ring_write(ring, 0x00000010); /*  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) static int evergreen_cp_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	u32 rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 				 SOFT_RESET_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 				 SOFT_RESET_SH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 				 SOFT_RESET_VGT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 				 SOFT_RESET_SPI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 				 SOFT_RESET_SX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	WREG32(GRBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	/* Set ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	rb_bufsz = order_base_2(ring->ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	tmp |= BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	WREG32(CP_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	/* Set the write pointer delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	WREG32(CP_RB_WPTR_DELAY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	/* Initialize the ring buffer's read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	WREG32(CP_RB_RPTR_WR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	WREG32(CP_RB_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	/* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	WREG32(CP_RB_RPTR_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		WREG32(SCRATCH_UMSK, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 		tmp |= RB_NO_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 		WREG32(SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	WREG32(CP_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	evergreen_cp_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	ring->ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		ring->ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)  * Core functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) static void evergreen_gpu_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	u32 gb_addr_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	u32 mc_shared_chmap, mc_arb_ramcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	u32 sx_debug_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	u32 smx_dc_ctl0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	u32 sq_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	u32 sq_lds_resource_mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	u32 sq_gpr_resource_mgmt_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	u32 sq_gpr_resource_mgmt_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	u32 sq_gpr_resource_mgmt_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	u32 sq_thread_resource_mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	u32 sq_thread_resource_mgmt_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	u32 sq_stack_resource_mgmt_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	u32 sq_stack_resource_mgmt_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	u32 sq_stack_resource_mgmt_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	u32 vgt_cache_invalidation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	u32 hdp_host_path_cntl, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	u32 disabled_rb_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	int i, j, ps_thread_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	case CHIP_CYPRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	case CHIP_HEMLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		rdev->config.evergreen.num_ses = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		rdev->config.evergreen.max_tile_pipes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		rdev->config.evergreen.max_simds = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		rdev->config.evergreen.max_stack_entries = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 		rdev->config.evergreen.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	case CHIP_JUNIPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		rdev->config.evergreen.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		rdev->config.evergreen.max_simds = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		rdev->config.evergreen.max_stack_entries = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		rdev->config.evergreen.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	case CHIP_REDWOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 		rdev->config.evergreen.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		rdev->config.evergreen.max_simds = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 		rdev->config.evergreen.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 		rdev->config.evergreen.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		rdev->config.evergreen.max_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		rdev->config.evergreen.max_tile_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		rdev->config.evergreen.max_simds = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		rdev->config.evergreen.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		rdev->config.evergreen.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		rdev->config.evergreen.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		rdev->config.evergreen.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 		rdev->config.evergreen.sx_max_export_pos_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		rdev->config.evergreen.sx_max_export_smx_size = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 		rdev->config.evergreen.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		rdev->config.evergreen.sq_num_cf_insts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 		rdev->config.evergreen.max_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		rdev->config.evergreen.max_tile_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		rdev->config.evergreen.max_simds = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 		rdev->config.evergreen.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 		rdev->config.evergreen.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		rdev->config.evergreen.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		rdev->config.evergreen.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 		rdev->config.evergreen.sx_max_export_pos_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		rdev->config.evergreen.sx_max_export_smx_size = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		rdev->config.evergreen.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		rdev->config.evergreen.sq_num_cf_insts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		rdev->config.evergreen.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 		if (rdev->pdev->device == 0x9648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			rdev->config.evergreen.max_simds = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 		else if ((rdev->pdev->device == 0x9647) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 			 (rdev->pdev->device == 0x964a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 			rdev->config.evergreen.max_simds = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 			rdev->config.evergreen.max_simds = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		rdev->config.evergreen.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 		rdev->config.evergreen.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 		rdev->config.evergreen.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 		rdev->config.evergreen.max_simds = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		rdev->config.evergreen.max_stack_entries = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		rdev->config.evergreen.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	case CHIP_BARTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		rdev->config.evergreen.num_ses = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 		rdev->config.evergreen.max_tile_pipes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 		rdev->config.evergreen.max_simds = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		rdev->config.evergreen.max_stack_entries = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 		rdev->config.evergreen.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	case CHIP_TURKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 		rdev->config.evergreen.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		rdev->config.evergreen.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		rdev->config.evergreen.max_simds = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 		rdev->config.evergreen.max_threads = 248;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		rdev->config.evergreen.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		rdev->config.evergreen.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		rdev->config.evergreen.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 		rdev->config.evergreen.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 		rdev->config.evergreen.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 		rdev->config.evergreen.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		rdev->config.evergreen.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	case CHIP_CAICOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		rdev->config.evergreen.num_ses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 		rdev->config.evergreen.max_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		rdev->config.evergreen.max_tile_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		rdev->config.evergreen.max_simds = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		rdev->config.evergreen.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 		rdev->config.evergreen.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		rdev->config.evergreen.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		rdev->config.evergreen.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		rdev->config.evergreen.sx_num_of_sets = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		rdev->config.evergreen.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		rdev->config.evergreen.sx_max_export_pos_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		rdev->config.evergreen.sx_max_export_smx_size = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 		rdev->config.evergreen.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		rdev->config.evergreen.sq_num_cf_insts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	/* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 		WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	WREG32(SRBM_INT_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	WREG32(SRBM_INT_ACK, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	evergreen_fix_pci_max_read_req_size(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	if ((rdev->family == CHIP_PALM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	    (rdev->family == CHIP_SUMO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	    (rdev->family == CHIP_SUMO2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	 * not have bank info, so create a custom tiling dword.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	 * bits 3:0   num_pipes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	 * bits 7:4   num_banks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	 * bits 11:8  group_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	 * bits 15:12 row_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	rdev->config.evergreen.tile_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	switch (rdev->config.evergreen.max_tile_pipes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 		rdev->config.evergreen.tile_config |= (0 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 		rdev->config.evergreen.tile_config |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 		rdev->config.evergreen.tile_config |= (2 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		rdev->config.evergreen.tile_config |= (3 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 		rdev->config.evergreen.tile_config |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		case 0: /* four banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 			rdev->config.evergreen.tile_config |= 0 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 		case 1: /* eight banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 			rdev->config.evergreen.tile_config |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		case 2: /* sixteen banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 			rdev->config.evergreen.tile_config |= 2 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	rdev->config.evergreen.tile_config |= 0 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	rdev->config.evergreen.tile_config |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		((gb_addr_config & 0x30000000) >> 28) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 		u32 efuse_straps_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 		u32 efuse_straps_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 		efuse_straps_4 = RREG32_RCU(0x204);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 		efuse_straps_3 = RREG32_RCU(0x203);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 		tmp = (((efuse_straps_4 & 0xf) << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 		      ((efuse_straps_3 & 0xf0000000) >> 28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 			u32 rb_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 			tmp <<= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 			tmp |= rb_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	/* enabled rb are just the one not disabled :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	disabled_rb_mask = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	for (i = 0; i < rdev->config.evergreen.max_backends; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		tmp |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	/* if all the backends are disabled, fix it up here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	if ((disabled_rb_mask & tmp) == tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		for (i = 0; i < rdev->config.evergreen.max_backends; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			disabled_rb_mask &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 		u32 simd_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		tmp <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 		tmp |= simd_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	rdev->config.evergreen.active_simds = hweight32(~tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	WREG32(DMA_TILING_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	if ((rdev->config.evergreen.max_backends == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	    (rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 		if ((disabled_rb_mask & 3) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 			/* RB0 disabled, RB1 enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 			tmp = 0x11111111;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 			/* RB1 disabled, RB0 enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 			tmp = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		tmp = gb_addr_config & NUM_PIPES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 						EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	rdev->config.evergreen.backend_map = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	WREG32(GB_BACKEND_MAP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	WREG32(CGTS_TCC_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	WREG32(CGTS_USER_TCC_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	/* set HW defaults for 3D engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 				     ROQ_IB2_START(0x2b)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 			     SYNC_GRADIENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 			     SYNC_WALKER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 			     SYNC_ALIGNER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	sx_debug_1 = RREG32(SX_DEBUG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	WREG32(SX_DEBUG_1, sx_debug_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	if (rdev->family <= CHIP_SUMO2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		WREG32(SMX_SAR_CTL0, 0x00010000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	WREG32(VGT_NUM_INSTANCES, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	WREG32(SPI_CONFIG_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	WREG32(CP_PERFMON_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 				  FETCH_FIFO_HIWATER(0x4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 				  DONE_FIFO_HIWATER(0xe0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	sq_config = RREG32(SQ_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	sq_config &= ~(PS_PRIO(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		       VS_PRIO(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		       GS_PRIO(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		       ES_PRIO(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	sq_config |= (VC_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		      EXPORT_SRC_C |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		      PS_PRIO(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		      VS_PRIO(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		      GS_PRIO(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 		      ES_PRIO(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	case CHIP_CAICOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		/* no vertex cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		sq_config &= ~VC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 	case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		ps_thread_count = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		ps_thread_count = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	WREG32(SQ_CONFIG, sq_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 					  FORCE_EOV_MAX_REZ_CNT(255)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	case CHIP_CAICOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	WREG32(VGT_GS_VERTEX_REUSE, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	WREG32(CB_PERF_CTR0_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	WREG32(CB_PERF_CTR0_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	WREG32(CB_PERF_CTR1_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	WREG32(CB_PERF_CTR1_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	WREG32(CB_PERF_CTR2_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	WREG32(CB_PERF_CTR2_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	WREG32(CB_PERF_CTR3_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	WREG32(CB_PERF_CTR3_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	/* clear render buffer base addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 	WREG32(CB_COLOR0_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	WREG32(CB_COLOR1_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	WREG32(CB_COLOR2_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 	WREG32(CB_COLOR3_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	WREG32(CB_COLOR4_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	WREG32(CB_COLOR5_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	WREG32(CB_COLOR6_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	WREG32(CB_COLOR7_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	WREG32(CB_COLOR8_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	WREG32(CB_COLOR9_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	WREG32(CB_COLOR10_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	WREG32(CB_COLOR11_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	/* set the shader const cache sizes to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 		WREG32(i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		WREG32(i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	tmp = RREG32(HDP_MISC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	WREG32(HDP_MISC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 	udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) int evergreen_mc_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	int chansize, numchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	/* Get VRAM informations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	rdev->mc.vram_is_ddr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	if ((rdev->family == CHIP_PALM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	    (rdev->family == CHIP_SUMO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	    (rdev->family == CHIP_SUMO2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		tmp = RREG32(FUS_MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		tmp = RREG32(MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	if (tmp & CHANSIZE_OVERRIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		chansize = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	} else if (tmp & CHANSIZE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 		chansize = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 		chansize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	tmp = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 		numchan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 		numchan = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 		numchan = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 		numchan = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 	rdev->mc.vram_width = numchan * chansize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	/* Could aper size report 0 ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	/* Setup GPU memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	if ((rdev->family == CHIP_PALM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	    (rdev->family == CHIP_SUMO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	    (rdev->family == CHIP_SUMO2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 		/* size in bytes on fusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 		/* size in MB on evergreen/cayman/tn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	r700_vram_gtt_location(rdev, &rdev->mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	radeon_update_bandwidth_info(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		RREG32(GRBM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		RREG32(GRBM_STATUS_SE0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		RREG32(GRBM_STATUS_SE1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		RREG32(SRBM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	dev_info(rdev->dev, "  SRBM_STATUS2              = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 		RREG32(SRBM_STATUS2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		RREG32(CP_STALLED_STAT1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		RREG32(CP_STALLED_STAT2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		RREG32(CP_BUSY_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 		RREG32(CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		RREG32(DMA_STATUS_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 	if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 		dev_info(rdev->dev, "  R_00D834_DMA_STATUS_REG   = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 			 RREG32(DMA_STATUS_REG + 0x800));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) bool evergreen_is_display_hung(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	u32 crtc_hung = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	u32 crtc_status[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 	u32 i, j, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 			crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 			crtc_hung |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	for (j = 0; j < 10; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 		for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 			if (crtc_hung & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 				tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 				if (tmp != crtc_status[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 					crtc_hung &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		if (crtc_hung == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	u32 reset_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	/* GRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 	tmp = RREG32(GRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	if (tmp & (PA_BUSY | SC_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 		   SH_BUSY | SX_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 		   TA_BUSY | VGT_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 		   DB_BUSY | CB_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 		   SPI_BUSY | VGT_BUSY_NO_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 		reset_mask |= RADEON_RESET_GFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 		   CP_BUSY | CP_COHERENCY_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		reset_mask |= RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	if (tmp & GRBM_EE_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 	/* DMA_STATUS_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	tmp = RREG32(DMA_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	if (!(tmp & DMA_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 		reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	/* SRBM_STATUS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 	tmp = RREG32(SRBM_STATUS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	if (tmp & DMA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 		reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 	/* SRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 	tmp = RREG32(SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 		reset_mask |= RADEON_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	if (tmp & IH_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		reset_mask |= RADEON_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	if (tmp & SEM_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 		reset_mask |= RADEON_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	if (tmp & GRBM_RQ_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		reset_mask |= RADEON_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 	if (tmp & VMC_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 		   MCC_BUSY | MCD_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 		reset_mask |= RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 	if (evergreen_is_display_hung(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 		reset_mask |= RADEON_RESET_DISPLAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	/* VM_L2_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 	tmp = RREG32(VM_L2_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	if (tmp & L2_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 		reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 	/* Skip MC reset as it's mostly likely not hung, just busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 	if (reset_mask & RADEON_RESET_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 		reset_mask &= ~RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 	return reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 	if (reset_mask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	evergreen_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	/* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	if (reset_mask & RADEON_RESET_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 		/* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 		tmp = RREG32(DMA_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 		tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 		WREG32(DMA_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 	if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 		grbm_soft_reset |= SOFT_RESET_DB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 			SOFT_RESET_CB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 			SOFT_RESET_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 			SOFT_RESET_SC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 			SOFT_RESET_SPI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 			SOFT_RESET_SX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 			SOFT_RESET_SH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 			SOFT_RESET_TC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 			SOFT_RESET_TA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 			SOFT_RESET_VC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 			SOFT_RESET_VGT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	if (reset_mask & RADEON_RESET_CP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 		grbm_soft_reset |= SOFT_RESET_CP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 			SOFT_RESET_VGT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 		srbm_soft_reset |= SOFT_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	if (reset_mask & RADEON_RESET_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		srbm_soft_reset |= SOFT_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	if (reset_mask & RADEON_RESET_DISPLAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 		srbm_soft_reset |= SOFT_RESET_DC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	if (reset_mask & RADEON_RESET_RLC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 		srbm_soft_reset |= SOFT_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	if (reset_mask & RADEON_RESET_SEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 		srbm_soft_reset |= SOFT_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	if (reset_mask & RADEON_RESET_IH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 		srbm_soft_reset |= SOFT_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	if (reset_mask & RADEON_RESET_GRBM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		srbm_soft_reset |= SOFT_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 	if (reset_mask & RADEON_RESET_VMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 		srbm_soft_reset |= SOFT_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	if (!(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 		if (reset_mask & RADEON_RESET_MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 			srbm_soft_reset |= SOFT_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	if (grbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 		tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 		tmp |= grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 		WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 		tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 		udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 		tmp &= ~grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 		WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 		tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	if (srbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 		tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 		tmp |= srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 		WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 		tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 		udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		tmp &= ~srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 	/* Wait a little for things to settle down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	evergreen_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	evergreen_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 	u32 tmp, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	dev_info(rdev->dev, "GPU pci config reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	/* disable dpm? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 	/* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 	udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	/* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 	tmp = RREG32(DMA_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 	tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 	WREG32(DMA_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	/* XXX other engines? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	/* halt the rlc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	r600_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 	udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	/* set mclk/sclk to bypass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 	rv770_set_clk_bypass_mode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 	/* disable BM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	pci_clear_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 	/* disable mem access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 	if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	/* reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 	radeon_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	/* wait for asic to come out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 	for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) int evergreen_asic_reset(struct radeon_device *rdev, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	u32 reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	if (hard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 		evergreen_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	if (reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 		r600_set_bios_scratch_engine_hung(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 	/* try soft reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	evergreen_gpu_soft_reset(rdev, reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 	/* try pci config reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	if (reset_mask && radeon_hard_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 		evergreen_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	if (!reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 		r600_set_bios_scratch_engine_hung(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)  * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)  * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)  * @ring: radeon_ring structure holding ring information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)  * Check if the GFX engine is locked up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)  * Returns true if the engine appears to be locked up, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 	if (!(reset_mask & (RADEON_RESET_GFX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 			    RADEON_RESET_COMPUTE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 			    RADEON_RESET_CP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 		radeon_ring_lockup_update(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 	return radeon_ring_test_lockup(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)  * RLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) #define RLC_SAVE_RESTORE_LIST_END_MARKER    0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) #define RLC_CLEAR_STATE_END_MARKER          0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) void sumo_rlc_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	/* save restore block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	if (rdev->rlc.save_restore_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 		if (unlikely(r != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 		radeon_bo_unpin(rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 		radeon_bo_unref(&rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 		rdev->rlc.save_restore_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	/* clear state block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 	if (rdev->rlc.clear_state_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 		if (unlikely(r != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 		radeon_bo_unpin(rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 		radeon_bo_unref(&rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 		rdev->rlc.clear_state_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 	/* clear state block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	if (rdev->rlc.cp_table_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 		r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 		if (unlikely(r != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 			dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 		radeon_bo_unpin(rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 		radeon_bo_unreserve(rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 		radeon_bo_unref(&rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 		rdev->rlc.cp_table_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) #define CP_ME_TABLE_SIZE    96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) int sumo_rlc_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 	const u32 *src_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	volatile u32 *dst_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	u32 dws, data, i, j, k, reg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	u64 reg_list_mc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	const struct cs_section_def *cs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	src_ptr = rdev->rlc.reg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	dws = rdev->rlc.reg_list_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 	if (rdev->family >= CHIP_BONAIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 		dws += (5 * 16) + 48 + 48 + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	cs_data = rdev->rlc.cs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	if (src_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 		/* save restore block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 		if (rdev->rlc.save_restore_obj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 					     NULL, &rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 				dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 				return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 		if (unlikely(r != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 		r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 				  &rdev->rlc.save_restore_gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 			radeon_bo_unreserve(rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 			dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 		r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 			dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 		/* write the sr buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 		dst_ptr = rdev->rlc.sr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 		if (rdev->family >= CHIP_TAHITI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 			/* SI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 			for (i = 0; i < rdev->rlc.reg_list_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 				dst_ptr[i] = cpu_to_le32(src_ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 			/* ON/LN/TN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 			/* format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 			 * dw0: (reg2 << 16) | reg1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 			 * dw1: reg1 save space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 			 * dw2: reg2 save space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 			for (i = 0; i < dws; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 				data = src_ptr[i] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 				i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 				if (i < dws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 					data |= (src_ptr[i] >> 2) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 				j = (((i - 1) * 3) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 				dst_ptr[j] = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 			j = ((i * 3) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 			dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		radeon_bo_kunmap(rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	if (cs_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 		/* clear state block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 		if (rdev->family >= CHIP_BONAIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 			rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 		} else if (rdev->family >= CHIP_TAHITI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 			rdev->rlc.clear_state_size = si_get_csb_size(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 			dws = rdev->rlc.clear_state_size + (256 / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 			reg_list_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 			dws = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 			for (i = 0; cs_data[i].section != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 					reg_list_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 					dws += cs_data[i].section[j].reg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 			reg_list_blk_index = (3 * reg_list_num + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 			dws += reg_list_blk_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 			rdev->rlc.clear_state_size = dws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 		if (rdev->rlc.clear_state_obj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 					     NULL, &rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 				dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 				sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 				return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 		if (unlikely(r != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 		r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 				  &rdev->rlc.clear_state_gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 			radeon_bo_unreserve(rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 			dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 		r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 			dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 		/* set up the cs buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 		dst_ptr = rdev->rlc.cs_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 		if (rdev->family >= CHIP_BONAIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 			cik_get_csb_buffer(rdev, dst_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 		} else if (rdev->family >= CHIP_TAHITI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 			dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 			dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 			dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 			si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 			reg_list_hdr_blk_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 			data = upper_32_bits(reg_list_mc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 			reg_list_hdr_blk_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 			for (i = 0; cs_data[i].section != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 					reg_num = cs_data[i].section[j].reg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 					data = reg_list_mc_addr & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 					reg_list_hdr_blk_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 					data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 					reg_list_hdr_blk_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 					data = 0x08000000 | (reg_num * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 					reg_list_hdr_blk_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 					for (k = 0; k < reg_num; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 						data = cs_data[i].section[j].extent[k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 						dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 					reg_list_mc_addr += reg_num * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 					reg_list_blk_index += reg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 		radeon_bo_kunmap(rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	if (rdev->rlc.cp_table_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 		if (rdev->rlc.cp_table_obj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 			r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 					     PAGE_SIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 					     NULL, &rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 				dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 				sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 				return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 		r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 		if (unlikely(r != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 			dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 		r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 				  &rdev->rlc.cp_table_gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 			radeon_bo_unreserve(rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 			dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 		r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 			dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 		cik_init_cp_pg_table(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 		radeon_bo_kunmap(rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 		radeon_bo_unreserve(rdev->rlc.cp_table_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) static void evergreen_rlc_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	u32 mask = RLC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 	if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 		mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	WREG32(RLC_CNTL, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) int evergreen_rlc_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	if (!rdev->rlc_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 	r600_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	WREG32(RLC_HB_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 	if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 		if (rdev->family == CHIP_ARUBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 			u32 always_on_bitmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 				3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 			/* find out the number of active simds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 			u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 			tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 			tmp = hweight32(~tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 			if (tmp == rdev->config.cayman.max_simds_per_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 				WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 				WREG32(TN_RLC_LB_PARAMS, 0x00601004);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 				WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 				WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 				WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 			WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 			WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 		WREG32(RLC_HB_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 		WREG32(RLC_HB_RPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 		WREG32(RLC_HB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 	WREG32(RLC_MC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	WREG32(RLC_UCODE_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 	fw_data = (const __be32 *)rdev->rlc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 	if (rdev->family >= CHIP_ARUBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 			WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 	} else if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 			WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 			WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	WREG32(RLC_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 	evergreen_rlc_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) /* Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 	if (crtc >= rdev->num_crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) void evergreen_disable_interrupt_state(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 	if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 		cayman_cp_int_cntl_setup(rdev, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 		cayman_cp_int_cntl_setup(rdev, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 		cayman_cp_int_cntl_setup(rdev, 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 		WREG32(CAYMAN_DMA1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 	WREG32(DMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 	WREG32(GRBM_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 	WREG32(SRBM_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 	for (i = 0; i < rdev->num_crtc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 		WREG32(INT_MASK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 	for (i = 0; i < rdev->num_crtc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 	/* only one DAC on DCE5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 	if (!ASIC_IS_DCE5(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 	for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 		WREG32_AND(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_POLARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) /* Note that the order we write back regs here is important */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) int evergreen_irq_set(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 	u32 grbm_int_cntl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 	u32 dma_cntl, dma_cntl1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	u32 thermal_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 	if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 	/* don't enable anything if the ih is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	if (!rdev->ih.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 		r600_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 		/* force the active interrupt state to all disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 		evergreen_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 	if (rdev->family == CHIP_ARUBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 		thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 		thermal_int = RREG32(CG_THERMAL_INT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 	if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 		/* enable CP interrupts on all rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 			cp_int_cntl |= RB_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 		DRM_DEBUG("r600_irq_set: sw int dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 		dma_cntl |= TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 			DRM_DEBUG("r600_irq_set: sw int dma1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 			dma_cntl1 |= TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 	if (rdev->irq.dpm_thermal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 		DRM_DEBUG("dpm thermal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 	if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 		WREG32(CP_INT_CNTL, cp_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 	WREG32(DMA_CNTL, dma_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 	if (rdev->family >= CHIP_CAYMAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 		radeon_irq_kms_set_irq_n_enabled(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 		    rdev, INT_MASK + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 		    VBLANK_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 		    rdev->irq.crtc_vblank_int[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 		    atomic_read(&rdev->irq.pflip[i]), "vblank", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	for (i = 0; i < rdev->num_crtc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 		radeon_irq_kms_set_irq_n_enabled(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 		    rdev, DC_HPDx_INT_CONTROL(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 		    DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 		    rdev->irq.hpd[i], "HPD", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 	if (rdev->family == CHIP_ARUBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 		WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 		WREG32(CG_THERMAL_INT, thermal_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 		radeon_irq_kms_set_irq_n_enabled(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 		    rdev, AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 		    AFMT_AZ_FORMAT_WTRIG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 		    rdev->irq.afmt[i], "HDMI", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 	/* posting read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 	RREG32(SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) /* Note that the order we write back regs here is important */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) static void evergreen_irq_ack(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 	u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 	u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 		disp_int[i] = RREG32(evergreen_disp_int_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 		afmt_status[i] = RREG32(AFMT_STATUS + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 		if (i < rdev->num_crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 			grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	/* We write back each interrupt register in pairs of two */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 	for (i = 0; i < rdev->num_crtc; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 		for (j = i; j < (i + 2); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 			if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 				WREG32(GRPH_INT_STATUS + crtc_offsets[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 				       GRPH_PFLIP_INT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 		for (j = i; j < (i + 2); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 			if (disp_int[j] & LB_D1_VBLANK_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 				WREG32(VBLANK_STATUS + crtc_offsets[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 				       VBLANK_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 			if (disp_int[j] & LB_D1_VLINE_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 				WREG32(VLINE_STATUS + crtc_offsets[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 				       VLINE_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 	for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 		if (disp_int[i] & DC_HPD1_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 	for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 		if (disp_int[i] & DC_HPD1_RX_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 	for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 		if (afmt_status[i] & AFMT_AZ_FORMAT_WTRIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 			WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 				  AFMT_AZ_FORMAT_WTRIG_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) static void evergreen_irq_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 	r600_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 	/* Wait and acknowledge irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 	mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 	evergreen_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 	evergreen_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) void evergreen_irq_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	evergreen_irq_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 	r600_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 	u32 wptr, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 	if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 		wptr = RREG32(IH_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 	if (wptr & RB_OVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 		wptr &= ~RB_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 		/* When a ring buffer overflow happen start parsing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 		 * from the last not overwritten vector (wptr + 16). Hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 		 * this should allow us to catchup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 		tmp = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 		WREG32(IH_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	return (wptr & rdev->ih.ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) int evergreen_irq_process(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 	u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 	u32 crtc_idx, hpd_idx, afmt_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 	u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	u32 wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	u32 rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 	u32 src_id, src_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	u32 ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 	bool queue_hotplug = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 	bool queue_hdmi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 	bool queue_dp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 	bool queue_thermal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 	u32 status, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 	const char *event_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 	if (!rdev->ih.enabled || rdev->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 	wptr = evergreen_get_ih_wptr(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) restart_ih:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	/* is somebody else already processing irqs? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 	if (atomic_xchg(&rdev->ih.lock, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 	rptr = rdev->ih.rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 	DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	/* Order reading of wptr vs. reading of IH ring data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 	rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 	/* display interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 	evergreen_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 	while (rptr != wptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 		/* wptr/rptr are in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 		ring_index = rptr / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 		switch (src_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 		case 1: /* D1 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 		case 2: /* D2 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 		case 3: /* D3 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 		case 4: /* D4 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 		case 5: /* D5 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 		case 6: /* D6 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 			crtc_idx = src_id - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 			if (src_data == 0) { /* vblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 				mask = LB_D1_VBLANK_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 				event_name = "vblank";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 				if (rdev->irq.crtc_vblank_int[crtc_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 					drm_handle_vblank(rdev->ddev, crtc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 					rdev->pm.vblank_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 					wake_up(&rdev->irq.vblank_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 				if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 					radeon_crtc_handle_vblank(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 								  crtc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 			} else if (src_data == 1) { /* vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 				mask = LB_D1_VLINE_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 				event_name = "vline";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 				DRM_DEBUG("Unhandled interrupt: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 					  src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 			if (!(disp_int[crtc_idx] & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 				DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 					  crtc_idx + 1, event_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 			disp_int[crtc_idx] &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 			DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 		case 8: /* D1 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 		case 10: /* D2 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 		case 12: /* D3 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) 		case 14: /* D4 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 		case 16: /* D5 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 		case 18: /* D6 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 			if (radeon_use_pflipirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 		case 42: /* HPD hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 			if (src_data <= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 				hpd_idx = src_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 				mask = DC_HPD1_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 				queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 				event_name = "HPD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) 			} else if (src_data <= 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 				hpd_idx = src_data - 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 				mask = DC_HPD1_RX_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) 				queue_dp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) 				event_name = "HPD_RX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 				DRM_DEBUG("Unhandled interrupt: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 					  src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 			if (!(disp_int[hpd_idx] & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 			disp_int[hpd_idx] &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 			DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 		case 44: /* hdmi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 			afmt_idx = src_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 			if (afmt_idx > 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 				DRM_ERROR("Unhandled interrupt: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 					  src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 			afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 			queue_hdmi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 			DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 		case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 			WREG32(SRBM_INT_ACK, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 		case 124: /* UVD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 		case 146:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) 		case 147:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 			/* reset addr and status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 			if (addr == 0x0 && status == 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 				addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 			cayman_vm_decode_fault(rdev, status, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 		case 176: /* CP_INT in ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 		case 177: /* CP_INT in IB1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 		case 178: /* CP_INT in IB2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 		case 181: /* CP EOP event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 			DRM_DEBUG("IH: CP EOP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 			if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 				switch (src_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 				case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 				case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 				case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) 				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) 		case 224: /* DMA trap event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) 			DRM_DEBUG("IH: DMA trap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) 			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) 		case 230: /* thermal low to high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 			DRM_DEBUG("IH: thermal low to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 			rdev->pm.dpm.thermal.high_to_low = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 			queue_thermal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 		case 231: /* thermal high to low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 			DRM_DEBUG("IH: thermal high to low\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 			rdev->pm.dpm.thermal.high_to_low = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 			queue_thermal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 		case 233: /* GUI IDLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 			DRM_DEBUG("IH: GUI idle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 		case 244: /* DMA trap event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 			if (rdev->family >= CHIP_CAYMAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 				DRM_DEBUG("IH: DMA1 trap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 		/* wptr/rptr are in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 		rptr += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 		rptr &= rdev->ih.ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 		WREG32(IH_RB_RPTR, rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 	if (queue_dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) 		schedule_work(&rdev->dp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 	if (queue_hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 		schedule_delayed_work(&rdev->hotplug_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) 	if (queue_hdmi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) 		schedule_work(&rdev->audio_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 	if (queue_thermal && rdev->pm.dpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) 		schedule_work(&rdev->pm.dpm.thermal.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) 	rdev->ih.rptr = rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 	atomic_set(&rdev->ih.lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) 	/* make sure wptr hasn't changed while processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 	wptr = evergreen_get_ih_wptr(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) 	if (wptr != rptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 		goto restart_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) static void evergreen_uvd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) 	if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) 	r = radeon_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) 		 * to early fails uvd_v2_2_resume() and thus nothing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 		 * there. So it is pointless to try to go through that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 		 * hence why we disable uvd here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 		rdev->has_uvd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) static void evergreen_uvd_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) 	if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 	r = uvd_v2_2_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) static void evergreen_uvd_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) 	struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 	r = uvd_v1_0_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) static int evergreen_startup(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 	struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 	/* enable pcie gen2 link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 	evergreen_pcie_gen2_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 	/* enable aspm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 	evergreen_program_aspm(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 	/* scratch needs to be initialized before MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 	r = r600_vram_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 	evergreen_mc_program(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 	if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 		r = ni_mc_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 			DRM_ERROR("Failed to load MC firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 	if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) 		evergreen_agp_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 		r = evergreen_pcie_gart_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 	evergreen_gpu_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 	/* allocate rlc buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) 	if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) 		rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) 		rdev->rlc.reg_list_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) 			(u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) 		rdev->rlc.cs_data = evergreen_cs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 		r = sumo_rlc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 			DRM_ERROR("Failed to init rlc BOs!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 	/* allocate wb buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 	r = radeon_wb_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 	evergreen_uvd_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 	/* Enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) 	if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) 		r = radeon_irq_kms_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) 	r = r600_irq_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) 		radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) 	evergreen_irq_set(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) 			     RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 	r = evergreen_cp_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 	r = evergreen_cp_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 	r = r600_dma_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 	evergreen_uvd_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 	r = radeon_ib_pool_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 	r = radeon_audio_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 		DRM_ERROR("radeon: audio init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) int evergreen_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 	/* reset the asic, the gfx blocks are often in a bad state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 	 * after the driver is unloaded or after a resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 	if (radeon_asic_reset(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 		dev_warn(rdev->dev, "GPU reset failed !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 	 * posting will perform necessary task to bring back GPU into good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 	 * shape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 	/* post card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 	atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 	/* init golden registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 	evergreen_init_golden_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 	if (rdev->pm.pm_method == PM_METHOD_DPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 		radeon_pm_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 	rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 	r = evergreen_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 		DRM_ERROR("evergreen startup failed on resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 		rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) int evergreen_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 	radeon_pm_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 	radeon_audio_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) 	if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 		uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 		radeon_uvd_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 	r700_cp_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) 	r600_dma_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) 	evergreen_irq_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) 	radeon_wb_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 	evergreen_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) /* Plan is to move initialization in that function and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171)  * helper function so that radeon_device_init pretty much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172)  * do nothing more than calling asic specific function. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173)  * should also allow to remove a bunch of callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174)  * like vram_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) int evergreen_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) 	/* Read BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) 	if (!radeon_get_bios(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) 		if (ASIC_IS_AVIVO(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) 	/* Must be an ATOMBIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) 	if (!rdev->is_atom_bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) 		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 	r = radeon_atombios_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 	/* reset the asic, the gfx blocks are often in a bad state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 	 * after the driver is unloaded or after a resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 	if (radeon_asic_reset(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 		dev_warn(rdev->dev, "GPU reset failed !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 	/* Post card if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) 	if (!radeon_card_posted(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) 		if (!rdev->bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 		DRM_INFO("GPU not posted. posting now...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) 		atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) 	/* init golden registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) 	evergreen_init_golden_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 	/* Initialize scratch registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) 	r600_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 	/* Initialize surface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 	radeon_surface_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 	/* Initialize clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) 	radeon_get_clock_info(rdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 	/* Fence driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) 	r = radeon_fence_driver_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 	/* initialize AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 	if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 		r = radeon_agp_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 			radeon_agp_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 	/* initialize memory controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 	r = evergreen_mc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 	/* Memory manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 	r = radeon_bo_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 	if (ASIC_IS_DCE5(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) 			r = ni_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 				DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 				return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 			r = r600_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) 				DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 				return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) 	/* Initialize power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) 	radeon_pm_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) 	evergreen_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) 	rdev->ih.ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) 	r600_ih_ring_init(rdev, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 	r = r600_pcie_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 	rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 	r = evergreen_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) 		dev_err(rdev->dev, "disabling GPU acceleration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) 		r700_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) 		r600_dma_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) 		r600_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) 		if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) 			sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) 		radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) 		radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) 		radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) 		evergreen_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) 		rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 	/* Don't start up if the MC ucode is missing on BTC parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 	 * The default clocks and voltages before the MC ucode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 	 * is loaded are not suffient for advanced operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 	if (ASIC_IS_DCE5(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 			DRM_ERROR("radeon: MC ucode required for NI+.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) void evergreen_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 	radeon_pm_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) 	radeon_audio_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 	r700_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) 	r600_dma_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) 	r600_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 	if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 		sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 	radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 	radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 	radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 	uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) 	radeon_uvd_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 	evergreen_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 	r600_vram_scratch_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) 	radeon_gem_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 	radeon_fence_driver_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 	radeon_agp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 	radeon_bo_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 	radeon_atombios_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 	kfree(rdev->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 	rdev->bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 	u32 link_width_cntl, speed_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 	if (radeon_pcie_gen2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 	if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 	if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 	/* x2 cards have a special sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 	if (ASIC_IS_X2(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 	if (speed_cntl & LC_CURRENT_DATA_RATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) 		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) 	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) 		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 		speed_cntl |= LC_GEN2_EN_STRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 		if (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 			link_width_cntl |= LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) void evergreen_program_aspm(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 	u32 data, orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 	u32 pcie_lc_cntl, pcie_lc_cntl_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 	bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 	/* fusion_platform = true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 	 * if the system is a fusion system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) 	 * (APU or DGPU in a fusion system).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 	 * todo: check if the system is a fusion platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 	bool fusion_platform = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) 	if (radeon_aspm == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) 	if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) 	switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) 	case CHIP_CYPRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) 	case CHIP_HEMLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) 	case CHIP_JUNIPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) 	case CHIP_REDWOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) 	case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) 	case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) 	case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) 	case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) 	case CHIP_ARUBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) 		disable_l0s = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 		disable_l0s = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) 	if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) 		fusion_platform = true; /* XXX also dGPUs in a fusion system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) 	data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) 	if (fusion_platform)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) 		data &= ~MULTI_PIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) 		data |= MULTI_PIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) 	if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) 		WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) 	data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) 	if (fusion_platform)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) 		data &= ~MULTI_PIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 		data |= MULTI_PIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 	if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 		WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 	pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 	pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 	if (!disable_l0s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) 		if (rdev->family >= CHIP_BARTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 			pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) 			pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 	if (!disable_l1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) 		if (rdev->family >= CHIP_BARTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) 			pcie_lc_cntl |= LC_L1_INACTIVITY(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 			pcie_lc_cntl |= LC_L1_INACTIVITY(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 		if (!disable_plloff_in_l1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 			data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 			if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 			data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) 			if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 			data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 			if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 			data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) 			if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 			if (rdev->family >= CHIP_BARTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) 				data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 				data |= PLL_RAMP_UP_TIME_0(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 				if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) 				data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) 				data |= PLL_RAMP_UP_TIME_1(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 				if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 				data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 				data |= PLL_RAMP_UP_TIME_0(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) 				if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) 				data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) 				data |= PLL_RAMP_UP_TIME_1(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) 				if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) 			data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) 			data |= LC_DYN_LANES_PWR_STATE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) 			if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 			if (rdev->family >= CHIP_BARTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 				data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 				data &= ~LS2_EXIT_TIME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) 				data |= LS2_EXIT_TIME(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 				if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) 					WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) 				data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 				data &= ~LS2_EXIT_TIME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 				data |= LS2_EXIT_TIME(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 				if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 					WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) 	/* evergreen parts only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 	if (rdev->family < CHIP_BARTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) 		pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 	if (pcie_lc_cntl != pcie_lc_cntl_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 		WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) }