^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2010 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Authors: Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <drm/radeon_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "atom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "cayman_blit_shaders.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "clearstate_cayman.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "ni_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "nid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "radeon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "radeon_asic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "radeon_audio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "radeon_ucode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Indirect registers accessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) spin_lock_irqsave(&rdev->smc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) WREG32(TN_SMC_IND_INDEX_0, (reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) r = RREG32(TN_SMC_IND_DATA_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) spin_lock_irqsave(&rdev->smc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) WREG32(TN_SMC_IND_INDEX_0, (reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) WREG32(TN_SMC_IND_DATA_0, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static const u32 tn_rlc_save_restore_register_list[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 0x98fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 0x98f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 0x9834,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 0x9838,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 0x9870,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 0x9874,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 0x8a14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 0x8b24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 0x8bcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 0x8b10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 0x8c30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 0x8d00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 0x8d04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 0x8c00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 0x8c04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 0x8c10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 0x8c14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 0x8d8c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 0x8cf0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 0x8e38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 0x9508,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 0x9688,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 0x9608,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 0x960c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 0x9610,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 0x9614,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) 0x88c4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) 0x8978,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) 0x88d4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) 0x900c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) 0x9100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 0x913c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 0x90e8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 0x9354,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 0xa008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 0x98f8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 0x9148,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 0x914c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 0x3f94,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 0x98f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 0x9b7c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 0x3f8c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 0x8950,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 0x8954,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 0x8a18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 0x8b28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 0x9144,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 0x3f90,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 0x915c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 0x9160,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 0x9178,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 0x917c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 0x9180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 0x918c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 0x9190,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 0x9194,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 0x9198,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 0x919c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 0x91a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 0x91ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 0x91b0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 0x91b4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 0x91b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 0x91c4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 0x91c8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 0x91cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 0x91d0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 0x91d4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 0x91e0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 0x91e4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 0x91ec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 0x91f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 0x91f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 0x9200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 0x9204,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 0x929c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 0x8030,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 0x9150,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 0x9a60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 0x920c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 0x9210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 0x9228,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 0x922c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 0x9244,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 0x9248,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 0x91e8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 0x9294,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 0x9208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 0x9224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 0x9240,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 0x9220,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 0x923c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 0x9258,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 0x9744,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 0xa200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 0xa204,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 0xa208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 0xa20c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 0x8d58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 0x9030,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 0x9034,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 0x9038,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 0x903c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 0x9040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 0x9654,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 0x897c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 0xa210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 0xa214,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 0x9868,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 0xa02c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 0x9664,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 0x9698,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 0x949c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 0x8e10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 0x8e18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 0x8c50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 0x8c58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 0x8c60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 0x8c68,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 0x89b4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 0x9830,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 0x802c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) extern bool evergreen_is_display_hung(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) extern void evergreen_mc_program(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) extern void evergreen_irq_suspend(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) extern int evergreen_mc_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern void evergreen_program_aspm(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) extern void sumo_rlc_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) extern int sumo_rlc_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Firmware Names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) MODULE_FIRMWARE("radeon/BARTS_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) MODULE_FIRMWARE("radeon/BARTS_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) MODULE_FIRMWARE("radeon/BARTS_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) MODULE_FIRMWARE("radeon/BTC_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) MODULE_FIRMWARE("radeon/TURKS_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) MODULE_FIRMWARE("radeon/TURKS_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) MODULE_FIRMWARE("radeon/TURKS_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) MODULE_FIRMWARE("radeon/CAICOS_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MODULE_FIRMWARE("radeon/ARUBA_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static const u32 cayman_golden_registers2[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 0x3e5c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 0x3e48, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 0x3e4c, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 0x3e64, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 0x3e50, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 0x3e60, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static const u32 cayman_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 0x5eb4, 0xffffffff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 0x5e78, 0x8f311ff1, 0x001000f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 0x3f90, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 0x9148, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 0xc78, 0x00000080, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 0xbd4, 0x70073777, 0x00011003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 0xd0b8, 0x73773777, 0x02011003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 0x98f8, 0x33773777, 0x02011003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 0x98fc, 0xffffffff, 0x76541032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 0x2f48, 0x33773777, 0x42010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 0x6b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 0x7728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 0x10328, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 0x10f28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 0x11b28, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 0x12728, 0x00000010, 0x00000012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 0x10c, 0x00000001, 0x00010003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 0xa02c, 0xffffffff, 0x0000009b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 0x913c, 0x0000010f, 0x01000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 0x9508, 0x3700001f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 0x88c4, 0x001f3ae3, 0x00000082,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 0x88d0, 0xffffffff, 0x0f40df40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 0x8974, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static const u32 dvst_golden_registers2[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 0x8f8, 0xffffffff, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 0x8fc, 0x00380000, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 0x8f8, 0xffffffff, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 0x8fc, 0x0e000000, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static const u32 dvst_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 0x690, 0x3fff3fff, 0x20c00033,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 0x918c, 0x0fff0fff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 0x91a8, 0x0fff0fff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 0x9150, 0xffffdfff, 0x6e944040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 0x917c, 0x0fff0fff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 0x9198, 0x0fff0fff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 0x915c, 0x0fff0fff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 0x3f90, 0xffff0001, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 0x9178, 0x0fff0fff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 0x9194, 0x0fff0fff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 0x9148, 0xffff0001, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 0x9190, 0x0fff0fff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 0x91ac, 0x0fff0fff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 0x929c, 0x00000fff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 0x55e4, 0xff607fff, 0xfc000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 0x8a18, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 0x8b28, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 0x9144, 0xfffc0fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 0x6ed8, 0x00010101, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 0x9838, 0xfffffffe, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 0xd0c0, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 0xd0b8, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 0x5bb0, 0x000000f0, 0x00000070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 0x98f8, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 0x98fc, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 0x9b7c, 0x00ff0000, 0x00fc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 0x8030, 0x00001f0f, 0x0000100a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 0x2f48, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 0x2408, 0x00030000, 0x000c007f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 0x8a14, 0xf000003f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 0x4d8, 0x00000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 0x913c, 0xffff03ff, 0x01000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 0x8c00, 0x000000ff, 0x00000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 0x8cf0, 0x1fff1fff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 0x9508, 0xf700071f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 0x20ef8, 0x01ff01ff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 0x20e98, 0xfffffbff, 0x00200000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 0x2015c, 0xffffffff, 0x00000f40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 0x88c4, 0x001f3ae3, 0x00000082,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 0x8978, 0x3fffffff, 0x04050140,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 0x8974, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static const u32 scrapper_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 0x690, 0x3fff3fff, 0x20c00033,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 0x918c, 0x0fff0fff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 0x918c, 0x0fff0fff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 0x91a8, 0x0fff0fff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 0x91a8, 0x0fff0fff, 0x00010006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 0x9150, 0xffffdfff, 0x6e944040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 0x9150, 0xffffdfff, 0x6e944040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 0x917c, 0x0fff0fff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 0x917c, 0x0fff0fff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 0x9198, 0x0fff0fff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 0x9198, 0x0fff0fff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 0x915c, 0x0fff0fff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 0x915c, 0x0fff0fff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 0x3f90, 0xffff0001, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 0x3f90, 0xffff0001, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 0x9178, 0x0fff0fff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 0x9178, 0x0fff0fff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 0x9194, 0x0fff0fff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 0x9194, 0x0fff0fff, 0x00070000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 0x9148, 0xffff0001, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 0x9148, 0xffff0001, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 0x9190, 0x0fff0fff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 0x9190, 0x0fff0fff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 0x91ac, 0x0fff0fff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 0x91ac, 0x0fff0fff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 0x3f94, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 0x914c, 0xffff0000, 0xff000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 0x929c, 0x00000fff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 0x929c, 0x00000fff, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 0x55e4, 0xff607fff, 0xfc000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 0x8a18, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 0x8a18, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 0x8b28, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 0x8b28, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 0x9144, 0xfffc0fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 0x9144, 0xfffc0fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 0x6ed8, 0x00010101, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 0x9838, 0xfffffffe, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 0x9838, 0xfffffffe, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 0xd0c0, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 0xd02c, 0xbfffff1f, 0x08421000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 0xd0b8, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 0xd0b8, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 0x5bb0, 0x000000f0, 0x00000070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 0x98f8, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 0x98f8, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 0x98fc, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 0x98fc, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 0x9b7c, 0x00ff0000, 0x00fc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 0x9b7c, 0x00ff0000, 0x00fc0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 0x8030, 0x00001f0f, 0x0000100a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 0x8030, 0x00001f0f, 0x0000100a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 0x2f48, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 0x2f48, 0x73773777, 0x12010001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 0x2408, 0x00030000, 0x000c007f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 0x8a14, 0xf000003f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 0x8a14, 0xf000003f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 0x8b24, 0x3fff3fff, 0x00ff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 0x28a4c, 0x07ffffff, 0x06000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 0x4d8, 0x00000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 0x4d8, 0x00000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 0xa008, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 0x913c, 0xffff03ff, 0x01000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 0x913c, 0xffff03ff, 0x01000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 0x90e8, 0x001fffff, 0x010400c0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 0x8c00, 0x000000ff, 0x00000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 0x8c00, 0x000000ff, 0x00000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 0x8c04, 0xf8ff00ff, 0x40600060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 0x8c30, 0x0000000f, 0x00040005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 0x8cf0, 0x1fff1fff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 0x8cf0, 0x1fff1fff, 0x08e00410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 0x900c, 0x00ffffff, 0x0017071f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 0x28350, 0x00000f01, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 0x9508, 0xf700071f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 0x9508, 0xf700071f, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 0x9688, 0x00300000, 0x0017000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 0x960c, 0xffffffff, 0x54763210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 0x20ef8, 0x01ff01ff, 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 0x20e98, 0xfffffbff, 0x00200000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 0x2015c, 0xffffffff, 0x00000f40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 0x88c4, 0x001f3ae3, 0x00000082,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 0x88c4, 0x001f3ae3, 0x00000082,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 0x8978, 0x3fffffff, 0x04050140,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 0x8978, 0x3fffffff, 0x04050140,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 0x8974, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 0x8974, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void ni_init_golden_registers(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case CHIP_CAYMAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) cayman_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) (const u32)ARRAY_SIZE(cayman_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cayman_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) (const u32)ARRAY_SIZE(cayman_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) case CHIP_ARUBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if ((rdev->pdev->device == 0x9900) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) (rdev->pdev->device == 0x9901) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) (rdev->pdev->device == 0x9903) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) (rdev->pdev->device == 0x9904) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (rdev->pdev->device == 0x9905) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) (rdev->pdev->device == 0x9906) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) (rdev->pdev->device == 0x9907) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) (rdev->pdev->device == 0x9908) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) (rdev->pdev->device == 0x9909) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) (rdev->pdev->device == 0x990A) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) (rdev->pdev->device == 0x990B) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) (rdev->pdev->device == 0x990C) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (rdev->pdev->device == 0x990D) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) (rdev->pdev->device == 0x990E) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) (rdev->pdev->device == 0x990F) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) (rdev->pdev->device == 0x9910) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) (rdev->pdev->device == 0x9913) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) (rdev->pdev->device == 0x9917) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) (rdev->pdev->device == 0x9918)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dvst_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) (const u32)ARRAY_SIZE(dvst_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dvst_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) (const u32)ARRAY_SIZE(dvst_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) scrapper_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) (const u32)ARRAY_SIZE(scrapper_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dvst_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) (const u32)ARRAY_SIZE(dvst_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #define BTC_IO_MC_REGS_SIZE 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {0x00000077, 0xff010100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {0x00000078, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {0x00000079, 0x00001434},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {0x0000007a, 0xcc08ec08},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {0x0000007b, 0x00040000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {0x0000007c, 0x000080c0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {0x0000007d, 0x09000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {0x0000007e, 0x00210404},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {0x00000081, 0x08a8e800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {0x00000082, 0x00030444},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {0x00000085, 0x00000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {0x00000086, 0x00000002},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {0x00000087, 0x48490000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {0x00000088, 0x20244647},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {0x00000089, 0x00000005},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {0x0000008b, 0x66030000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {0x0000008c, 0x00006603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {0x0000008d, 0x00000100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {0x0000008f, 0x00001c0a},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {0x00000090, 0xff000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {0x0000009f, 0x00946a00}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {0x00000077, 0xff010100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {0x00000078, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {0x00000079, 0x00001434},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {0x0000007a, 0xcc08ec08},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {0x0000007b, 0x00040000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {0x0000007c, 0x000080c0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {0x0000007d, 0x09000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {0x0000007e, 0x00210404},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {0x00000081, 0x08a8e800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {0x00000082, 0x00030444},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {0x00000085, 0x00000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {0x00000086, 0x00000002},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {0x00000087, 0x48490000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {0x00000088, 0x20244647},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {0x00000089, 0x00000005},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {0x0000008b, 0x66030000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {0x0000008c, 0x00006603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {0x0000008d, 0x00000100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {0x0000008f, 0x00001c0a},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {0x00000090, 0xff000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {0x0000009f, 0x00936a00}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {0x00000077, 0xff010100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {0x00000078, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {0x00000079, 0x00001434},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {0x0000007a, 0xcc08ec08},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {0x0000007b, 0x00040000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {0x0000007c, 0x000080c0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {0x0000007d, 0x09000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {0x0000007e, 0x00210404},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {0x00000081, 0x08a8e800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {0x00000082, 0x00030444},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {0x00000085, 0x00000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {0x00000086, 0x00000002},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {0x00000087, 0x48490000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {0x00000088, 0x20244647},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {0x00000089, 0x00000005},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {0x0000008b, 0x66030000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {0x0000008c, 0x00006603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {0x0000008d, 0x00000100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {0x0000008f, 0x00001c0a},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {0x00000090, 0xff000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {0x0000009f, 0x00916a00}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {0x00000077, 0xff010100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {0x00000078, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {0x00000079, 0x00001434},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {0x0000007a, 0xcc08ec08},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {0x0000007b, 0x00040000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {0x0000007c, 0x000080c0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {0x0000007d, 0x09000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {0x0000007e, 0x00210404},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {0x00000081, 0x08a8e800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {0x00000082, 0x00030444},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {0x00000085, 0x00000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {0x00000086, 0x00000002},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {0x00000087, 0x48490000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {0x00000088, 0x20244647},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {0x00000089, 0x00000005},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {0x0000008b, 0x66030000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {0x0000008c, 0x00006603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {0x0000008d, 0x00000100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {0x0000008f, 0x00001c0a},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {0x00000090, 0xff000001},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {0x0000009f, 0x00976b00}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int ni_mc_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u32 mem_type, running, blackout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) u32 *io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int i, ucode_size, regs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!rdev->mc_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case CHIP_BARTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) io_mc_regs = (u32 *)&barts_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ucode_size = BTC_MC_UCODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) regs_size = BTC_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case CHIP_TURKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) io_mc_regs = (u32 *)&turks_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ucode_size = BTC_MC_UCODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) regs_size = BTC_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case CHIP_CAICOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) io_mc_regs = (u32 *)&caicos_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ucode_size = BTC_MC_UCODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) regs_size = BTC_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) case CHIP_CAYMAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) io_mc_regs = (u32 *)&cayman_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ucode_size = CAYMAN_MC_UCODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) regs_size = BTC_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* reset the engine and set to writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* load mc io regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) for (i = 0; i < regs_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* load the MC ucode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) fw_data = (const __be32 *)rdev->mc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) for (i = 0; i < ucode_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* put the engine back into the active state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* wait for training to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int ni_init_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) const char *chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) const char *rlc_chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) size_t smc_req_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) char fw_name[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) DRM_DEBUG("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) case CHIP_BARTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) chip_name = "BARTS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) rlc_chip_name = "BTC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mc_req_size = BTC_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) case CHIP_TURKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) chip_name = "TURKS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) rlc_chip_name = "BTC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) mc_req_size = BTC_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) case CHIP_CAICOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) chip_name = "CAICOS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) rlc_chip_name = "BTC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) mc_req_size = BTC_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) case CHIP_CAYMAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) chip_name = "CAYMAN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) rlc_chip_name = "CAYMAN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) case CHIP_ARUBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) chip_name = "ARUBA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rlc_chip_name = "ARUBA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* pfp/me same size as CAYMAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) mc_req_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) DRM_INFO("Loading %s Microcode\n", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (rdev->pfp_fw->size != pfp_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) rdev->pfp_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (rdev->me_fw->size != me_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) rdev->me_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (rdev->rlc_fw->size != rlc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) pr_err("ni_rlc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) rdev->rlc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* no MC ucode on TN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (rdev->mc_fw->size != mc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) rdev->mc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) pr_err("smc: error loading firmware \"%s\"\n", fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) release_firmware(rdev->smc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rdev->smc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) } else if (rdev->smc_fw->size != smc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rdev->mc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (err != -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) pr_err("ni_cp: Failed to load firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) release_firmware(rdev->pfp_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) rdev->pfp_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) release_firmware(rdev->me_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) rdev->me_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) release_firmware(rdev->rlc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) rdev->rlc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) release_firmware(rdev->mc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rdev->mc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * cayman_get_allowed_info_register - fetch the register for the info ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * @reg: register offset in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * @val: register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * Returns 0 for success or -EINVAL for an invalid register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) int cayman_get_allowed_info_register(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) case GRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case GRBM_STATUS_SE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) case GRBM_STATUS_SE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) case SRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) case SRBM_STATUS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case UVD_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *val = RREG32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int tn_get_temp(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int actual_temp = (temp / 8) - 49;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return actual_temp * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * Core functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static void cayman_gpu_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) u32 gb_addr_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) u32 mc_shared_chmap, mc_arb_ramcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) u32 cgts_tcc_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) u32 sx_debug_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u32 smx_dc_ctl0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) u32 cgts_sm_ctrl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) u32 hdp_host_path_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) u32 disabled_rb_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) case CHIP_CAYMAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rdev->config.cayman.max_shader_engines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) rdev->config.cayman.max_pipes_per_simd = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) rdev->config.cayman.max_tile_pipes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) rdev->config.cayman.max_simds_per_se = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rdev->config.cayman.max_backends_per_se = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) rdev->config.cayman.max_texture_channel_caches = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rdev->config.cayman.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) rdev->config.cayman.max_threads = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) rdev->config.cayman.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) rdev->config.cayman.max_stack_entries = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) rdev->config.cayman.sx_num_of_sets = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) rdev->config.cayman.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) rdev->config.cayman.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) rdev->config.cayman.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) rdev->config.cayman.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rdev->config.cayman.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rdev->config.cayman.sc_prim_fifo_size = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) case CHIP_ARUBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) rdev->config.cayman.max_shader_engines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) rdev->config.cayman.max_pipes_per_simd = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) rdev->config.cayman.max_tile_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if ((rdev->pdev->device == 0x9900) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) (rdev->pdev->device == 0x9901) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) (rdev->pdev->device == 0x9905) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) (rdev->pdev->device == 0x9906) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) (rdev->pdev->device == 0x9907) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) (rdev->pdev->device == 0x9908) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) (rdev->pdev->device == 0x9909) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) (rdev->pdev->device == 0x990B) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) (rdev->pdev->device == 0x990C) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) (rdev->pdev->device == 0x990F) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) (rdev->pdev->device == 0x9910) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) (rdev->pdev->device == 0x9917) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) (rdev->pdev->device == 0x9999) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) (rdev->pdev->device == 0x999C)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) rdev->config.cayman.max_simds_per_se = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) rdev->config.cayman.max_backends_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) rdev->config.cayman.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) rdev->config.cayman.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) rdev->config.cayman.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) rdev->config.cayman.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) } else if ((rdev->pdev->device == 0x9903) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) (rdev->pdev->device == 0x9904) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) (rdev->pdev->device == 0x990A) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) (rdev->pdev->device == 0x990D) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) (rdev->pdev->device == 0x990E) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) (rdev->pdev->device == 0x9913) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) (rdev->pdev->device == 0x9918) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) (rdev->pdev->device == 0x999D)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rdev->config.cayman.max_simds_per_se = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) rdev->config.cayman.max_backends_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) rdev->config.cayman.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) rdev->config.cayman.sx_max_export_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) rdev->config.cayman.sx_max_export_pos_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) rdev->config.cayman.sx_max_export_smx_size = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) } else if ((rdev->pdev->device == 0x9919) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) (rdev->pdev->device == 0x9990) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) (rdev->pdev->device == 0x9991) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) (rdev->pdev->device == 0x9994) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) (rdev->pdev->device == 0x9995) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) (rdev->pdev->device == 0x9996) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) (rdev->pdev->device == 0x999A) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) (rdev->pdev->device == 0x99A0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) rdev->config.cayman.max_simds_per_se = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) rdev->config.cayman.max_backends_per_se = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) rdev->config.cayman.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) rdev->config.cayman.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rdev->config.cayman.sx_max_export_pos_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) rdev->config.cayman.sx_max_export_smx_size = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rdev->config.cayman.max_simds_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) rdev->config.cayman.max_backends_per_se = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) rdev->config.cayman.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rdev->config.cayman.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rdev->config.cayman.sx_max_export_pos_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rdev->config.cayman.sx_max_export_smx_size = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) rdev->config.cayman.max_texture_channel_caches = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rdev->config.cayman.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rdev->config.cayman.max_threads = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) rdev->config.cayman.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) rdev->config.cayman.max_stack_entries = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rdev->config.cayman.sx_num_of_sets = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) rdev->config.cayman.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rdev->config.cayman.sc_prim_fifo_size = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) WREG32(SRBM_INT_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) WREG32(SRBM_INT_ACK, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) evergreen_fix_pci_max_read_req_size(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (rdev->config.cayman.mem_row_size_in_kb > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) rdev->config.cayman.mem_row_size_in_kb = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* XXX use MC settings? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rdev->config.cayman.shader_engine_tile_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rdev->config.cayman.num_gpus = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rdev->config.cayman.multi_gpu_tile_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) rdev->config.cayman.num_tile_pipes = (1 << tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) rdev->config.cayman.num_shader_engines = tmp + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) rdev->config.cayman.num_gpus = tmp + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* setup tiling info dword. gb_addr_config is not adequate since it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * not have bank info, so create a custom tiling dword.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * bits 3:0 num_pipes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * bits 7:4 num_banks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * bits 11:8 group_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * bits 15:12 row_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) rdev->config.cayman.tile_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) switch (rdev->config.cayman.num_tile_pipes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) rdev->config.cayman.tile_config |= (0 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) rdev->config.cayman.tile_config |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) rdev->config.cayman.tile_config |= (2 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) rdev->config.cayman.tile_config |= (3 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) rdev->config.cayman.tile_config |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) case 0: /* four banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) rdev->config.cayman.tile_config |= 0 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) case 1: /* eight banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) rdev->config.cayman.tile_config |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) case 2: /* sixteen banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) rdev->config.cayman.tile_config |= 2 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rdev->config.cayman.tile_config |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) rdev->config.cayman.tile_config |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) u32 rb_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) tmp <<= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) tmp |= rb_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* enabled rb are just the one not disabled :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) disabled_rb_mask = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) tmp |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /* if all the backends are disabled, fix it up here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if ((disabled_rb_mask & tmp) == tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) disabled_rb_mask &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) u32 simd_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) tmp <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) tmp |= simd_disable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) rdev->config.cayman.active_simds = hweight32(~tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) WREG32(GB_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (ASIC_IS_DCE6(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) WREG32(DMIF_ADDR_CALC, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) WREG32(HDP_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if ((rdev->config.cayman.max_backends_per_se == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) (rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if ((disabled_rb_mask & 3) == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* RB1 disabled, RB0 enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) tmp = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* RB0 disabled, RB1 enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) tmp = 0x11111111;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) tmp = gb_addr_config & NUM_PIPES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) tmp = r6xx_remap_render_backend(rdev, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) rdev->config.cayman.max_backends_per_se *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rdev->config.cayman.max_shader_engines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) CAYMAN_MAX_BACKENDS, disabled_rb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rdev->config.cayman.backend_map = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) WREG32(GB_BACKEND_MAP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) cgts_tcc_disable = 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) cgts_tcc_disable &= ~(1 << (16 + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* reprogram the shader complex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* set HW defaults for 3D engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) sx_debug_1 = RREG32(SX_DEBUG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) WREG32(SX_DEBUG_1, sx_debug_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) WREG32(SMX_DC_CTL0, smx_dc_ctl0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* need to be explicitly zero-ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) WREG32(VGT_OFFCHIP_LDS_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) WREG32(SQ_LSTMP_RING_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) WREG32(SQ_HSTMP_RING_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) WREG32(SQ_ESTMP_RING_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) WREG32(SQ_GSTMP_RING_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) WREG32(SQ_VSTMP_RING_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) WREG32(SQ_PSTMP_RING_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) WREG32(VGT_NUM_INSTANCES, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) WREG32(CP_PERFMON_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) FETCH_FIFO_HIWATER(0x4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) DONE_FIFO_HIWATER(0xe0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ALU_UPDATE_FIFO_HIWATER(0x8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) WREG32(SQ_CONFIG, (VC_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) EXPORT_SRC_C |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) GFX_PRIO(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) CS1_PRIO(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) CS2_PRIO(1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) FORCE_EOV_MAX_REZ_CNT(255)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) AUTO_INVLD_EN(ES_AND_GS_AUTO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) WREG32(VGT_GS_VERTEX_REUSE, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) WREG32(CB_PERF_CTR0_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) WREG32(CB_PERF_CTR0_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) WREG32(CB_PERF_CTR1_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) WREG32(CB_PERF_CTR1_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) WREG32(CB_PERF_CTR2_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) WREG32(CB_PERF_CTR2_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) WREG32(CB_PERF_CTR3_SEL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) WREG32(CB_PERF_CTR3_SEL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) tmp = RREG32(HDP_MISC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) tmp |= HDP_FLUSH_INVALIDATE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) WREG32(HDP_MISC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* set clockgating golden values on TN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (rdev->family == CHIP_ARUBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) tmp = RREG32_CG(CG_CGTT_LOCAL_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) tmp &= ~0x00380000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) WREG32_CG(CG_CGTT_LOCAL_0, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) tmp = RREG32_CG(CG_CGTT_LOCAL_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) tmp &= ~0x0e000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) WREG32_CG(CG_CGTT_LOCAL_1, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * GART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* flush hdp cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* bits 0-7 are the VM contexts0-7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) WREG32(VM_INVALIDATE_REQUEST, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static int cayman_pcie_gart_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (rdev->gart.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) r = radeon_gart_table_vram_pin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) WREG32(MC_VM_MX_L1_TLB_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) (0xA << 7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ENABLE_L1_TLB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) ENABLE_ADVANCED_DRIVER_MODEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) EFFECTIVE_L2_QUEUE_SIZE(7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) CONTEXT1_IDENTITY_ACCESS_MODE(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) BANK_SELECT(6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) L2_CACHE_BIGK_FRAGMENT_SIZE(6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* setup context0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) (u32)(rdev->dummy_page.addr >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) WREG32(VM_CONTEXT0_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) WREG32(0x15D4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) WREG32(0x15D8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) WREG32(0x15DC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* empty context1-7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* Assign the pt base to something valid for now; the pts used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * the VMs are determined by the application and setup and assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * on the fly in the vm part of radeon_gart.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) for (i = 1; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) rdev->vm_manager.max_pfn - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) rdev->vm_manager.saved_table_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* enable context1-7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) (u32)(rdev->dummy_page.addr >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) WREG32(VM_CONTEXT1_CNTL2, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) READ_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) cayman_pcie_gart_tlb_flush(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) (unsigned)(rdev->mc.gtt_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) (unsigned long long)rdev->gart.table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) rdev->gart.ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static void cayman_pcie_gart_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) for (i = 1; i < 8; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) rdev->vm_manager.saved_table_addr[i] = RREG32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Disable all tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) WREG32(VM_CONTEXT0_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) WREG32(VM_CONTEXT1_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) EFFECTIVE_L2_QUEUE_SIZE(7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) CONTEXT1_IDENTITY_ACCESS_MODE(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) L2_CACHE_BIGK_FRAGMENT_SIZE(6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) radeon_gart_table_vram_unpin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static void cayman_pcie_gart_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) cayman_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) radeon_gart_table_vram_free(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) radeon_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) int ring, u32 cp_int_cntl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) WREG32(SRBM_GFX_CNTL, RINGID(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) WREG32(CP_INT_CNTL, cp_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * CP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) void cayman_fence_ring_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct radeon_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct radeon_ring *ring = &rdev->ring[fence->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) PACKET3_SH_ACTION_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* flush read cache over gart for this vmid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) radeon_ring_write(ring, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) radeon_ring_write(ring, 10); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* EVENT_WRITE_EOP - flush caches, send int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) radeon_ring_write(ring, lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) radeon_ring_write(ring, fence->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct radeon_ring *ring = &rdev->ring[ib->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) PACKET3_SH_ACTION_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* set to DX10/11 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) radeon_ring_write(ring, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (ring->rptr_save_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) radeon_ring_write(ring, ((ring->rptr_save_reg -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) PACKET3_SET_CONFIG_REG_START) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) (2 << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) (ib->gpu_addr & 0xFFFFFFFC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* flush read cache over gart for this vmid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) radeon_ring_write(ring, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) WREG32(CP_ME_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) WREG32(SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) u32 rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) rptr = rdev->wb.wb[ring->rptr_offs/4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) rptr = RREG32(CP_RB0_RPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) rptr = RREG32(CP_RB1_RPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) rptr = RREG32(CP_RB2_RPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) u32 wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) wptr = RREG32(CP_RB0_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) wptr = RREG32(CP_RB1_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) wptr = RREG32(CP_RB2_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) void cayman_gfx_set_wptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) WREG32(CP_RB0_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) (void)RREG32(CP_RB0_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) WREG32(CP_RB1_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) (void)RREG32(CP_RB1_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) WREG32(CP_RB2_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) (void)RREG32(CP_RB2_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int cayman_cp_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (!rdev->me_fw || !rdev->pfp_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) cayman_cp_enable(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) fw_data = (const __be32 *)rdev->pfp_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) fw_data = (const __be32 *)rdev->me_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) WREG32(CP_ME_RAM_RADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int cayman_cp_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) r = radeon_ring_lock(rdev, ring, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) radeon_ring_write(ring, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) cayman_cp_enable(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /* setup clear context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) for (i = 0; i < cayman_default_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) radeon_ring_write(ring, cayman_default_state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* set clear context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* SQ_VTX_BASE_VTX_LOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) radeon_ring_write(ring, 0xc0026f00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) radeon_ring_write(ring, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) radeon_ring_write(ring, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) radeon_ring_write(ring, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* Clear consts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) radeon_ring_write(ring, 0xc0036f00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) radeon_ring_write(ring, 0x00000bc4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) radeon_ring_write(ring, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) radeon_ring_write(ring, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) radeon_ring_write(ring, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) radeon_ring_write(ring, 0xc0026900);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) radeon_ring_write(ring, 0x00000316);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) radeon_ring_write(ring, 0x00000010); /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) /* XXX init other rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void cayman_cp_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) cayman_cp_enable(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) radeon_ring_fini(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) radeon_scratch_free(rdev, ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static int cayman_cp_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static const int ridx[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) RADEON_RING_TYPE_GFX_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) CAYMAN_RING_TYPE_CP1_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) CAYMAN_RING_TYPE_CP2_INDEX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static const unsigned cp_rb_cntl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) CP_RB0_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) CP_RB1_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) CP_RB2_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static const unsigned cp_rb_rptr_addr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) CP_RB0_RPTR_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) CP_RB1_RPTR_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) CP_RB2_RPTR_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static const unsigned cp_rb_rptr_addr_hi[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) CP_RB0_RPTR_ADDR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) CP_RB1_RPTR_ADDR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) CP_RB2_RPTR_ADDR_HI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static const unsigned cp_rb_base[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) CP_RB0_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) CP_RB1_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) CP_RB2_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static const unsigned cp_rb_rptr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) CP_RB0_RPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) CP_RB1_RPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) CP_RB2_RPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) static const unsigned cp_rb_wptr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) CP_RB0_WPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) CP_RB1_WPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) CP_RB2_WPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) SOFT_RESET_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) SOFT_RESET_SH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) SOFT_RESET_VGT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) SOFT_RESET_SPI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) SOFT_RESET_SX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) WREG32(GRBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) WREG32(CP_SEM_WAIT_TIMER, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /* Set the write pointer delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) WREG32(CP_RB_WPTR_DELAY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) WREG32(CP_DEBUG, (1 << 27));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) WREG32(SCRATCH_UMSK, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) for (i = 0; i < 3; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) uint32_t rb_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) uint64_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /* Set ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) ring = &rdev->ring[ridx[i]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) rb_cntl = order_base_2(ring->ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) rb_cntl |= BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) WREG32(cp_rb_cntl[i], rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) /* set the rb base addr, this causes an internal reset of ALL rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) for (i = 0; i < 3; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ring = &rdev->ring[ridx[i]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) for (i = 0; i < 3; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* Initialize the ring buffer's read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ring = &rdev->ring[ridx[i]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) WREG32(cp_rb_rptr[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) WREG32(cp_rb_wptr[i], ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* start the rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) cayman_cp_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /* this only test cp0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) u32 reset_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* GRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) tmp = RREG32(GRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (tmp & (PA_BUSY | SC_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) SH_BUSY | SX_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) TA_BUSY | VGT_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) DB_BUSY | CB_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) GDS_BUSY | SPI_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) IA_BUSY | IA_BUSY_NO_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) reset_mask |= RADEON_RESET_GFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) CP_BUSY | CP_COHERENCY_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) reset_mask |= RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (tmp & GRBM_EE_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /* DMA_STATUS_REG 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (!(tmp & DMA_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /* DMA_STATUS_REG 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!(tmp & DMA_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) reset_mask |= RADEON_RESET_DMA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /* SRBM_STATUS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) tmp = RREG32(SRBM_STATUS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (tmp & DMA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (tmp & DMA1_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) reset_mask |= RADEON_RESET_DMA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* SRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) tmp = RREG32(SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) reset_mask |= RADEON_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (tmp & IH_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) reset_mask |= RADEON_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (tmp & SEM_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) reset_mask |= RADEON_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (tmp & GRBM_RQ_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) reset_mask |= RADEON_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (tmp & VMC_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) MCC_BUSY | MCD_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) reset_mask |= RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (evergreen_is_display_hung(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) reset_mask |= RADEON_RESET_DISPLAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /* VM_L2_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) tmp = RREG32(VM_L2_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (tmp & L2_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /* Skip MC reset as it's mostly likely not hung, just busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (reset_mask & RADEON_RESET_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) reset_mask &= ~RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (reset_mask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) evergreen_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) RREG32(0x14F8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) RREG32(0x14D8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) RREG32(0x14FC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) RREG32(0x14DC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (reset_mask & RADEON_RESET_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* dma0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (reset_mask & RADEON_RESET_DMA1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* dma1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) grbm_soft_reset = SOFT_RESET_CB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) SOFT_RESET_DB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) SOFT_RESET_GDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) SOFT_RESET_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) SOFT_RESET_SC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) SOFT_RESET_SPI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) SOFT_RESET_SH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) SOFT_RESET_SX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) SOFT_RESET_TC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) SOFT_RESET_TA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) SOFT_RESET_VGT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) SOFT_RESET_IA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (reset_mask & RADEON_RESET_CP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) srbm_soft_reset |= SOFT_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (reset_mask & RADEON_RESET_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) srbm_soft_reset |= SOFT_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (reset_mask & RADEON_RESET_DMA1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) srbm_soft_reset |= SOFT_RESET_DMA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (reset_mask & RADEON_RESET_DISPLAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) srbm_soft_reset |= SOFT_RESET_DC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (reset_mask & RADEON_RESET_RLC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) srbm_soft_reset |= SOFT_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (reset_mask & RADEON_RESET_SEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) srbm_soft_reset |= SOFT_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (reset_mask & RADEON_RESET_IH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) srbm_soft_reset |= SOFT_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (reset_mask & RADEON_RESET_GRBM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) srbm_soft_reset |= SOFT_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (reset_mask & RADEON_RESET_VMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) srbm_soft_reset |= SOFT_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (!(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (reset_mask & RADEON_RESET_MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) srbm_soft_reset |= SOFT_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (grbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) tmp |= grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) tmp &= ~grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (srbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) tmp |= srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) tmp &= ~srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /* Wait a little for things to settle down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) evergreen_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) evergreen_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) int cayman_asic_reset(struct radeon_device *rdev, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) u32 reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (hard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) evergreen_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) reset_mask = cayman_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) r600_set_bios_scratch_engine_hung(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) cayman_gpu_soft_reset(rdev, reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) reset_mask = cayman_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) evergreen_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) r600_set_bios_scratch_engine_hung(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * cayman_gfx_is_lockup - Check if the GFX engine is locked up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * @ring: radeon_ring structure holding ring information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * Check if the GFX engine is locked up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * Returns true if the engine appears to be locked up, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (!(reset_mask & (RADEON_RESET_GFX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) RADEON_RESET_COMPUTE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) RADEON_RESET_CP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) radeon_ring_lockup_update(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return radeon_ring_test_lockup(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static void cayman_uvd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) r = radeon_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * At this point rdev->uvd.vcpu_bo is NULL which trickles down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * to early fails uvd_v2_2_resume() and thus nothing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * there. So it is pointless to try to go through that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * hence why we disable uvd here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) rdev->has_uvd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) static void cayman_uvd_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) r = uvd_v2_2_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static void cayman_uvd_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) r = uvd_v1_0_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static void cayman_vce_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* Only set for CHIP_ARUBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (!rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) r = radeon_vce_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * At this point rdev->vce.vcpu_bo is NULL which trickles down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * to early fails cayman_vce_start() and thus nothing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * there. So it is pointless to try to go through that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * hence why we disable vce here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) rdev->has_vce = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) static void cayman_vce_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (!rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) r = radeon_vce_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) r = vce_v1_0_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static void cayman_vce_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) r = vce_v1_0_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static int cayman_startup(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /* enable pcie gen2 link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) evergreen_pcie_gen2_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* enable aspm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) evergreen_program_aspm(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* scratch needs to be initialized before MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) r = r600_vram_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) evergreen_mc_program(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) r = ni_mc_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) DRM_ERROR("Failed to load MC firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) r = cayman_pcie_gart_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) cayman_gpu_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) /* allocate rlc buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) rdev->rlc.reg_list_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) rdev->rlc.cs_data = cayman_cs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) r = sumo_rlc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) DRM_ERROR("Failed to init rlc BOs!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) /* allocate wb buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) r = radeon_wb_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) cayman_uvd_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) cayman_vce_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) /* Enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) r = radeon_irq_kms_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) r = r600_irq_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) DRM_ERROR("radeon: IH init failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) evergreen_irq_set(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) r = cayman_cp_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) r = cayman_cp_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) r = cayman_dma_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) cayman_uvd_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) cayman_vce_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) r = radeon_ib_pool_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) r = radeon_vm_manager_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) r = radeon_audio_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) int cayman_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * posting will perform necessary task to bring back GPU into good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * shape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /* post card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) /* init golden registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) ni_init_golden_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (rdev->pm.pm_method == PM_METHOD_DPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) radeon_pm_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) r = cayman_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) DRM_ERROR("cayman startup failed on resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) int cayman_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) radeon_pm_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) radeon_audio_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) radeon_vm_manager_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) cayman_cp_enable(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) cayman_dma_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) radeon_uvd_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) evergreen_irq_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) radeon_wb_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) cayman_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) /* Plan is to move initialization in that function and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * helper function so that radeon_device_init pretty much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * do nothing more than calling asic specific function. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * should also allow to remove a bunch of callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * like vram_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) int cayman_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* Read BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (!radeon_get_bios(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (ASIC_IS_AVIVO(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) /* Must be an ATOMBIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (!rdev->is_atom_bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) r = radeon_atombios_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* Post card if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (!radeon_card_posted(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (!rdev->bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) DRM_INFO("GPU not posted. posting now...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /* init golden registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) ni_init_golden_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /* Initialize scratch registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) r600_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /* Initialize surface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) radeon_surface_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Initialize clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) radeon_get_clock_info(rdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /* Fence driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) r = radeon_fence_driver_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* initialize memory controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) r = evergreen_mc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) /* Memory manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) r = radeon_bo_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) r = ni_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) r = ni_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) /* Initialize power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) radeon_pm_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) r600_ring_init(rdev, ring, 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) r600_ring_init(rdev, ring, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) r600_ring_init(rdev, ring, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) cayman_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) cayman_vce_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) rdev->ih.ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) r600_ih_ring_init(rdev, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) r = r600_pcie_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) r = cayman_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) dev_err(rdev->dev, "disabling GPU acceleration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) cayman_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) cayman_dma_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) r600_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) radeon_vm_manager_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) cayman_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) /* Don't start up if the MC ucode is missing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) * The default clocks and voltages before the MC ucode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) * is loaded are not suffient for advanced operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) * We can skip this check for TN, because there is no MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) * ucode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) DRM_ERROR("radeon: MC ucode required for NI+.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) void cayman_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) radeon_pm_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) cayman_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) cayman_dma_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) r600_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) radeon_vm_manager_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) radeon_uvd_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) radeon_vce_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) cayman_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) r600_vram_scratch_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) radeon_gem_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) radeon_fence_driver_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) radeon_bo_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) radeon_atombios_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) kfree(rdev->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) rdev->bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * vm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) int cayman_vm_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) /* number of VMs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) rdev->vm_manager.nvm = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) /* base offset of vram pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) tmp <<= 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) rdev->vm_manager.vram_base_offset = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) rdev->vm_manager.vram_base_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) void cayman_vm_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * cayman_vm_decode_fault - print human readable fault info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * Print human readable fault information (cayman/TN).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) void cayman_vm_decode_fault(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) u32 status, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) char *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) switch (mc_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) case 80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) case 160:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) case 144:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) case 224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) case 208:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) block = "CB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) case 33:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) case 97:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) case 81:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) case 161:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) case 145:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) case 225:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) case 209:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) block = "CB_FMASK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) case 34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) case 18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) case 98:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) case 82:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) case 162:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) case 146:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) case 226:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) case 210:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) block = "CB_CMASK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) case 35:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) case 19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) case 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) case 83:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) case 163:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) case 147:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) case 227:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) case 211:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) block = "CB_IMMED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) case 36:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) case 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) case 100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) case 84:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) case 164:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) case 148:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) case 228:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) case 212:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) block = "DB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) case 37:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) case 21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) case 101:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) case 85:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) case 165:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) case 149:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) case 229:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) case 213:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) block = "DB_HTILE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) case 38:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) case 22:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) case 102:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) case 86:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) case 166:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) case 150:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) case 230:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) case 214:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) block = "SX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) case 39:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) case 23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) case 103:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) case 87:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) case 167:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) case 151:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) case 231:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) case 215:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) block = "DB_STEN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) case 40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) case 104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) case 88:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) case 232:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) case 216:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) case 168:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) case 152:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) block = "TC_TFETCH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) case 41:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) case 25:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) case 105:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) case 89:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) case 233:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) case 217:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) case 169:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) case 153:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) block = "TC_VFETCH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) case 42:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) case 26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) case 106:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) case 90:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) case 234:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) case 218:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) case 170:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) case 154:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) block = "VC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) case 112:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) block = "CP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) case 113:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) case 114:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) block = "SH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) case 115:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) block = "VGT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) case 178:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) block = "IH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) case 51:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) block = "RLC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) case 55:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) block = "DMA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) case 56:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) block = "HDP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) block = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) protections, vmid, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) block, mc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * cayman_vm_flush - vm flush using the CP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * Update the page table base and flush the VM TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * using the CP (cayman-si).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) unsigned vm_id, uint64_t pd_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) radeon_ring_write(ring, pd_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* flush hdp cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) radeon_ring_write(ring, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) /* bits 0-7 are the VM contexts0-7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) radeon_ring_write(ring, 1 << vm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) /* wait for the invalidate to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) WAIT_REG_MEM_ENGINE(0))); /* me */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) radeon_ring_write(ring, 0); /* ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) radeon_ring_write(ring, 0); /* mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) radeon_ring_write(ring, 0x20); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) /* sync PFP to ME, otherwise we might get invalid PFP reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct atom_clock_dividers dividers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) ecclk, false, ÷rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (i == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (i == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }