^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2008 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2008 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2009 Jerome Glisse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Authors: Dave Airlie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Jerome Glisse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <drm/radeon_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "radeon_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "radeon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "radeon_asic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "r100d.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "r200_reg_safe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "r100_track.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int vtx_size, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) vtx_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (vtx_fmt_0 & R200_VTX_Z0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (vtx_fmt_0 & R200_VTX_W0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* blend weight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (vtx_fmt_0 & R200_VTX_N0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) vtx_size += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (vtx_fmt_0 & R200_VTX_SHININESS_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (vtx_fmt_0 & R200_VTX_SHININESS_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) switch (color_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) case 0: break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case 1: vtx_size++; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) case 2: vtx_size += 3; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) case 3: vtx_size += 4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (vtx_fmt_0 & R200_VTX_XY1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) vtx_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (vtx_fmt_0 & R200_VTX_Z1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (vtx_fmt_0 & R200_VTX_W1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (vtx_fmt_0 & R200_VTX_N1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) vtx_size += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return vtx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) uint64_t src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) uint64_t dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned num_gpu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct dma_resv *resv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct radeon_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) uint32_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) uint32_t cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int i, num_loops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* radeon pitch is /64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) DRM_ERROR("radeon: moving bo (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Must wait for 2D idle & clean before DMA or hangs might happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) radeon_ring_write(ring, (1 << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) for (i = 0; i < num_loops; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cur_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (cur_size > 0x1FFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cur_size = 0x1FFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) size -= cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) radeon_ring_write(ring, PACKET0(0x720, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) radeon_ring_write(ring, src_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) radeon_ring_write(ring, dst_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) src_offset += cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dst_offset += cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) radeon_ring_unlock_undo(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int vtx_size, i, tex_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) vtx_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (tex_size > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) vtx_size += tex_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return vtx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int r200_packet0_check(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned idx, unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct radeon_bo_list *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct r100_cs_track *track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) volatile uint32_t *ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int face;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u32 tile_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ib = p->ib.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) track = (struct r100_cs_track *)p->track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) idx_value = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) case RADEON_CRTC_GUI_TRIG_VLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) r = r100_cs_packet_parse_vline(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* FIXME: only allow PACKET3 blit? easier to check for out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * range access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case RADEON_DST_PITCH_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case RADEON_SRC_PITCH_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) r = r100_reloc_pitch_offset(p, pkt, idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case RADEON_RB3D_DEPTHOFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) track->zb.robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) track->zb.offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case RADEON_RB3D_COLOROFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) track->cb[0].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) track->cb[0].offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case R200_PP_TXOFFSET_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case R200_PP_TXOFFSET_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case R200_PP_TXOFFSET_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case R200_PP_TXOFFSET_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case R200_PP_TXOFFSET_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case R200_PP_TXOFFSET_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) i = (reg - R200_PP_TXOFFSET_0) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (reloc->tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) tile_flags |= R200_TXO_MACRO_TILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (reloc->tiling_flags & RADEON_TILING_MICRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) tile_flags |= R200_TXO_MICRO_TILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) tmp = idx_value & ~(0x7 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) tmp |= tile_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ib[idx] = tmp + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) track->textures[i].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) case R200_PP_CUBIC_OFFSET_F1_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) case R200_PP_CUBIC_OFFSET_F2_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) case R200_PP_CUBIC_OFFSET_F3_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) case R200_PP_CUBIC_OFFSET_F4_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) case R200_PP_CUBIC_OFFSET_F5_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) case R200_PP_CUBIC_OFFSET_F1_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case R200_PP_CUBIC_OFFSET_F2_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) case R200_PP_CUBIC_OFFSET_F3_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case R200_PP_CUBIC_OFFSET_F4_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case R200_PP_CUBIC_OFFSET_F5_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) case R200_PP_CUBIC_OFFSET_F1_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) case R200_PP_CUBIC_OFFSET_F2_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case R200_PP_CUBIC_OFFSET_F3_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case R200_PP_CUBIC_OFFSET_F4_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) case R200_PP_CUBIC_OFFSET_F5_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) case R200_PP_CUBIC_OFFSET_F1_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case R200_PP_CUBIC_OFFSET_F2_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case R200_PP_CUBIC_OFFSET_F3_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case R200_PP_CUBIC_OFFSET_F4_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case R200_PP_CUBIC_OFFSET_F5_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) case R200_PP_CUBIC_OFFSET_F1_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case R200_PP_CUBIC_OFFSET_F2_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case R200_PP_CUBIC_OFFSET_F3_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) case R200_PP_CUBIC_OFFSET_F4_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case R200_PP_CUBIC_OFFSET_F5_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case R200_PP_CUBIC_OFFSET_F1_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) case R200_PP_CUBIC_OFFSET_F2_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case R200_PP_CUBIC_OFFSET_F3_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) case R200_PP_CUBIC_OFFSET_F4_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) case R200_PP_CUBIC_OFFSET_F5_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) i = (reg - R200_PP_TXOFFSET_0) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) track->textures[i].cube_info[face - 1].offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) track->textures[i].cube_info[face - 1].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case RADEON_RE_WIDTH_HEIGHT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) track->maxy = ((idx_value >> 16) & 0x7FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case RADEON_RB3D_COLORPITCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (reloc->tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) tile_flags |= RADEON_COLOR_TILE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (reloc->tiling_flags & RADEON_TILING_MICRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) tmp = idx_value & ~(0x7 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) tmp |= tile_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ib[idx] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ib[idx] = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) case RADEON_RB3D_DEPTHPITCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) case RADEON_RB3D_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) track->cb[0].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) track->cb[0].cpp = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) track->cb[0].cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) DRM_ERROR("Invalid color buffer format (%d) !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) DRM_ERROR("No support for depth xy offset in kms\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) case RADEON_RB3D_ZSTENCILCNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) switch (idx_value & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) track->zb.cpp = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) track->zb.cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) case RADEON_RB3D_ZPASS_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) case RADEON_PP_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) uint32_t temp = idx_value >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) for (i = 0; i < track->num_texture; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) track->textures[i].enabled = !!(temp & (1 << i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) case RADEON_SE_VF_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) track->vap_vf_cntl = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case 0x210c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* VAP_VF_MAX_VTX_INDX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) track->max_indx = idx_value & 0x00FFFFFFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) case R200_SE_VTX_FMT_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) track->vtx_size = r200_get_vtx_size_0(idx_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case R200_SE_VTX_FMT_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) track->vtx_size += r200_get_vtx_size_1(idx_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case R200_PP_TXSIZE_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case R200_PP_TXSIZE_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case R200_PP_TXSIZE_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) case R200_PP_TXSIZE_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case R200_PP_TXSIZE_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case R200_PP_TXSIZE_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) i = (reg - R200_PP_TXSIZE_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case R200_PP_TXPITCH_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case R200_PP_TXPITCH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) case R200_PP_TXPITCH_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) case R200_PP_TXPITCH_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case R200_PP_TXPITCH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) case R200_PP_TXPITCH_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) i = (reg - R200_PP_TXPITCH_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) track->textures[i].pitch = idx_value + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case R200_PP_TXFILTER_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) case R200_PP_TXFILTER_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) case R200_PP_TXFILTER_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case R200_PP_TXFILTER_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) case R200_PP_TXFILTER_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) case R200_PP_TXFILTER_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) i = (reg - R200_PP_TXFILTER_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) >> R200_MAX_MIP_LEVEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) tmp = (idx_value >> 23) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (tmp == 2 || tmp == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) track->textures[i].roundup_w = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) tmp = (idx_value >> 27) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (tmp == 2 || tmp == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) track->textures[i].roundup_h = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) case R200_PP_TXMULTI_CTL_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) case R200_PP_TXMULTI_CTL_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case R200_PP_TXMULTI_CTL_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) case R200_PP_TXMULTI_CTL_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case R200_PP_TXMULTI_CTL_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case R200_PP_TXMULTI_CTL_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case R200_PP_TXFORMAT_X_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case R200_PP_TXFORMAT_X_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) case R200_PP_TXFORMAT_X_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case R200_PP_TXFORMAT_X_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case R200_PP_TXFORMAT_X_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) case R200_PP_TXFORMAT_X_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) i = (reg - R200_PP_TXFORMAT_X_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) track->textures[i].txdepth = idx_value & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) tmp = (idx_value >> 16) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* 2D, 3D, CUBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) switch (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* 1D/2D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) track->textures[i].tex_coord_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* CUBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) track->textures[i].tex_coord_type = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* 3D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) track->textures[i].tex_coord_type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) case R200_PP_TXFORMAT_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) case R200_PP_TXFORMAT_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) case R200_PP_TXFORMAT_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) case R200_PP_TXFORMAT_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) case R200_PP_TXFORMAT_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) case R200_PP_TXFORMAT_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) i = (reg - R200_PP_TXFORMAT_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (idx_value & R200_TXFORMAT_NON_POWER2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) track->textures[i].use_pitch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) track->textures[i].use_pitch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) track->textures[i].lookup_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) case R200_TXFORMAT_I8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) case R200_TXFORMAT_RGB332:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) case R200_TXFORMAT_Y8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) track->textures[i].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case R200_TXFORMAT_AI88:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) case R200_TXFORMAT_ARGB1555:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case R200_TXFORMAT_RGB565:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) case R200_TXFORMAT_ARGB4444:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) case R200_TXFORMAT_VYUY422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case R200_TXFORMAT_YVYU422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) case R200_TXFORMAT_LDVDU655:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case R200_TXFORMAT_DVDU88:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case R200_TXFORMAT_AVYU4444:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) track->textures[i].cpp = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case R200_TXFORMAT_ARGB8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) case R200_TXFORMAT_RGBA8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case R200_TXFORMAT_ABGR8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) case R200_TXFORMAT_BGR111110:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) case R200_TXFORMAT_LDVDU8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) track->textures[i].cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) case R200_TXFORMAT_DXT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) track->textures[i].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case R200_TXFORMAT_DXT23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case R200_TXFORMAT_DXT45:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) track->textures[i].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case R200_PP_CUBIC_FACES_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case R200_PP_CUBIC_FACES_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case R200_PP_CUBIC_FACES_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case R200_PP_CUBIC_FACES_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case R200_PP_CUBIC_FACES_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case R200_PP_CUBIC_FACES_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) tmp = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) i = (reg - R200_PP_CUBIC_FACES_0) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (face = 0; face < 4; face++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) void r200_set_safe_registers(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }