Rev 1412 | Rev 1430 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1412 | Rev 1414 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | #include "drmP.h" |
29 | #include "drmP.h" |
30 | #include "drm.h" |
30 | #include "drm.h" |
31 | #include "radeon_reg.h" |
31 | #include "radeon_reg.h" |
32 | #include "radeon.h" |
32 | #include "radeon.h" |
33 | #include "radeon_drm.h" |
33 | #include "radeon_drm.h" |
34 | 34 | ||
35 | #include "r300d.h" |
35 | #include "r300d.h" |
36 | #include "rv350d.h" |
36 | #include "rv350d.h" |
37 | #include "r300_reg_safe.h" |
37 | #include "r300_reg_safe.h" |
38 | 38 | ||
39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 |
39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 |
40 | * |
40 | * |
41 | * GPU Errata: |
41 | * GPU Errata: |
42 | * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL |
42 | * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL |
43 | * using MMIO to flush host path read cache, this lead to HARDLOCKUP. |
43 | * using MMIO to flush host path read cache, this lead to HARDLOCKUP. |
44 | * However, scheduling such write to the ring seems harmless, i suspect |
44 | * However, scheduling such write to the ring seems harmless, i suspect |
45 | * the CP read collide with the flush somehow, or maybe the MC, hard to |
45 | * the CP read collide with the flush somehow, or maybe the MC, hard to |
46 | * tell. (Jerome Glisse) |
46 | * tell. (Jerome Glisse) |
47 | */ |
47 | */ |
48 | 48 | ||
49 | /* |
49 | /* |
50 | * rv370,rv380 PCIE GART |
50 | * rv370,rv380 PCIE GART |
51 | */ |
51 | */ |
52 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
52 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
53 | 53 | ||
54 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
54 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
55 | { |
55 | { |
56 | uint32_t tmp; |
56 | uint32_t tmp; |
57 | int i; |
57 | int i; |
58 | 58 | ||
59 | /* Workaround HW bug do flush 2 times */ |
59 | /* Workaround HW bug do flush 2 times */ |
60 | for (i = 0; i < 2; i++) { |
60 | for (i = 0; i < 2; i++) { |
61 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
61 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
62 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
62 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
63 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
63 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
64 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
64 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
65 | } |
65 | } |
66 | mb(); |
66 | mb(); |
67 | } |
67 | } |
68 | 68 | ||
69 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
69 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
70 | { |
70 | { |
71 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
71 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
72 | 72 | ||
73 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
73 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
74 | return -EINVAL; |
74 | return -EINVAL; |
75 | } |
75 | } |
76 | addr = (lower_32_bits(addr) >> 8) | |
76 | addr = (lower_32_bits(addr) >> 8) | |
77 | ((upper_32_bits(addr) & 0xff) << 24) | |
77 | ((upper_32_bits(addr) & 0xff) << 24) | |
78 | 0xc; |
78 | 0xc; |
79 | /* on x86 we want this to be CPU endian, on powerpc |
79 | /* on x86 we want this to be CPU endian, on powerpc |
80 | * on powerpc without HW swappers, it'll get swapped on way |
80 | * on powerpc without HW swappers, it'll get swapped on way |
81 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
81 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
82 | writel(addr, ((void __iomem *)ptr) + (i * 4)); |
82 | writel(addr, ((void __iomem *)ptr) + (i * 4)); |
83 | return 0; |
83 | return 0; |
84 | } |
84 | } |
85 | 85 | ||
86 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
86 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
87 | { |
87 | { |
88 | int r; |
88 | int r; |
89 | 89 | ||
90 | if (rdev->gart.table.vram.robj) { |
90 | if (rdev->gart.table.vram.robj) { |
91 | WARN(1, "RV370 PCIE GART already initialized.\n"); |
91 | WARN(1, "RV370 PCIE GART already initialized.\n"); |
92 | return 0; |
92 | return 0; |
93 | } |
93 | } |
94 | /* Initialize common gart structure */ |
94 | /* Initialize common gart structure */ |
95 | r = radeon_gart_init(rdev); |
95 | r = radeon_gart_init(rdev); |
96 | if (r) |
96 | if (r) |
97 | return r; |
97 | return r; |
98 | r = rv370_debugfs_pcie_gart_info_init(rdev); |
98 | r = rv370_debugfs_pcie_gart_info_init(rdev); |
99 | if (r) |
99 | if (r) |
100 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
100 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
101 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
101 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
102 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
102 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
103 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
103 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
104 | return radeon_gart_table_vram_alloc(rdev); |
104 | return radeon_gart_table_vram_alloc(rdev); |
105 | } |
105 | } |
106 | 106 | ||
107 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
107 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
108 | { |
108 | { |
109 | uint32_t table_addr; |
109 | uint32_t table_addr; |
110 | uint32_t tmp; |
110 | uint32_t tmp; |
111 | int r; |
111 | int r; |
112 | 112 | ||
113 | if (rdev->gart.table.vram.robj == NULL) { |
113 | if (rdev->gart.table.vram.robj == NULL) { |
114 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
114 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
115 | return -EINVAL; |
115 | return -EINVAL; |
116 | } |
116 | } |
117 | r = radeon_gart_table_vram_pin(rdev); |
117 | r = radeon_gart_table_vram_pin(rdev); |
118 | if (r) |
118 | if (r) |
119 | return r; |
119 | return r; |
120 | /* discard memory request outside of configured range */ |
120 | /* discard memory request outside of configured range */ |
121 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
121 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
122 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
122 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
123 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); |
123 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); |
124 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; |
124 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; |
125 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
125 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
126 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
126 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
127 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
127 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
128 | table_addr = rdev->gart.table_addr; |
128 | table_addr = rdev->gart.table_addr; |
129 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
129 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
130 | /* FIXME: setup default page */ |
130 | /* FIXME: setup default page */ |
131 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); |
131 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); |
132 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
132 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
133 | /* Clear error */ |
133 | /* Clear error */ |
134 | WREG32_PCIE(0x18, 0); |
134 | WREG32_PCIE(0x18, 0); |
135 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
135 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
136 | tmp |= RADEON_PCIE_TX_GART_EN; |
136 | tmp |= RADEON_PCIE_TX_GART_EN; |
137 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
137 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
138 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
138 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
139 | rv370_pcie_gart_tlb_flush(rdev); |
139 | rv370_pcie_gart_tlb_flush(rdev); |
140 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", |
140 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", |
141 | (unsigned)(rdev->mc.gtt_size >> 20), table_addr); |
141 | (unsigned)(rdev->mc.gtt_size >> 20), table_addr); |
142 | rdev->gart.ready = true; |
142 | rdev->gart.ready = true; |
143 | return 0; |
143 | return 0; |
144 | } |
144 | } |
145 | 145 | ||
146 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
146 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
147 | { |
147 | { |
148 | u32 tmp; |
148 | u32 tmp; |
149 | int r; |
149 | int r; |
150 | 150 | ||
151 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
151 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
152 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
152 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
153 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
153 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
154 | if (rdev->gart.table.vram.robj) { |
154 | if (rdev->gart.table.vram.robj) { |
155 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
155 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
156 | if (likely(r == 0)) { |
156 | if (likely(r == 0)) { |
157 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
157 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
158 | radeon_bo_unpin(rdev->gart.table.vram.robj); |
158 | radeon_bo_unpin(rdev->gart.table.vram.robj); |
159 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
159 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
160 | } |
160 | } |
161 | } |
161 | } |
162 | } |
162 | } |
163 | 163 | ||
164 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
164 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
165 | { |
165 | { |
166 | rv370_pcie_gart_disable(rdev); |
166 | rv370_pcie_gart_disable(rdev); |
167 | radeon_gart_table_vram_free(rdev); |
167 | radeon_gart_table_vram_free(rdev); |
168 | radeon_gart_fini(rdev); |
168 | radeon_gart_fini(rdev); |
169 | } |
169 | } |
170 | 170 | ||
171 | void r300_fence_ring_emit(struct radeon_device *rdev, |
171 | void r300_fence_ring_emit(struct radeon_device *rdev, |
172 | struct radeon_fence *fence) |
172 | struct radeon_fence *fence) |
173 | { |
173 | { |
174 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
174 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
175 | * for enough space (today caller are ib schedule and buffer move) */ |
175 | * for enough space (today caller are ib schedule and buffer move) */ |
176 | /* Write SC register so SC & US assert idle */ |
176 | /* Write SC register so SC & US assert idle */ |
177 | radeon_ring_write(rdev, PACKET0(0x43E0, 0)); |
177 | radeon_ring_write(rdev, PACKET0(0x43E0, 0)); |
178 | radeon_ring_write(rdev, 0); |
178 | radeon_ring_write(rdev, 0); |
179 | radeon_ring_write(rdev, PACKET0(0x43E4, 0)); |
179 | radeon_ring_write(rdev, PACKET0(0x43E4, 0)); |
180 | radeon_ring_write(rdev, 0); |
180 | radeon_ring_write(rdev, 0); |
181 | /* Flush 3D cache */ |
181 | /* Flush 3D cache */ |
182 | radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); |
182 | radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); |
183 | radeon_ring_write(rdev, (2 << 0)); |
183 | radeon_ring_write(rdev, (2 << 0)); |
184 | radeon_ring_write(rdev, PACKET0(0x4F18, 0)); |
184 | radeon_ring_write(rdev, PACKET0(0x4F18, 0)); |
185 | radeon_ring_write(rdev, (1 << 0)); |
185 | radeon_ring_write(rdev, (1 << 0)); |
186 | /* Wait until IDLE & CLEAN */ |
186 | /* Wait until IDLE & CLEAN */ |
187 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
187 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
188 | radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); |
188 | radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); |
189 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
189 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
190 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | |
190 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | |
191 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
191 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
192 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
192 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
193 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); |
193 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); |
194 | /* Emit fence sequence & fire IRQ */ |
194 | /* Emit fence sequence & fire IRQ */ |
195 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); |
195 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); |
196 | radeon_ring_write(rdev, fence->seq); |
196 | radeon_ring_write(rdev, fence->seq); |
197 | radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
197 | radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
198 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
198 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
199 | } |
199 | } |
200 | 200 | ||
201 | 201 | ||
202 | #if 0 |
202 | #if 0 |
203 | 203 | ||
204 | 204 | ||
205 | int r300_copy_dma(struct radeon_device *rdev, |
205 | int r300_copy_dma(struct radeon_device *rdev, |
206 | uint64_t src_offset, |
206 | uint64_t src_offset, |
207 | uint64_t dst_offset, |
207 | uint64_t dst_offset, |
208 | unsigned num_pages, |
208 | unsigned num_pages, |
209 | struct radeon_fence *fence) |
209 | struct radeon_fence *fence) |
210 | { |
210 | { |
211 | uint32_t size; |
211 | uint32_t size; |
212 | uint32_t cur_size; |
212 | uint32_t cur_size; |
213 | int i, num_loops; |
213 | int i, num_loops; |
214 | int r = 0; |
214 | int r = 0; |
215 | 215 | ||
216 | /* radeon pitch is /64 */ |
216 | /* radeon pitch is /64 */ |
217 | size = num_pages << PAGE_SHIFT; |
217 | size = num_pages << PAGE_SHIFT; |
218 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); |
218 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); |
219 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); |
219 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); |
220 | if (r) { |
220 | if (r) { |
221 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
221 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
222 | return r; |
222 | return r; |
223 | } |
223 | } |
224 | /* Must wait for 2D idle & clean before DMA or hangs might happen */ |
224 | /* Must wait for 2D idle & clean before DMA or hangs might happen */ |
225 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 )); |
225 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 )); |
226 | radeon_ring_write(rdev, (1 << 16)); |
226 | radeon_ring_write(rdev, (1 << 16)); |
227 | for (i = 0; i < num_loops; i++) { |
227 | for (i = 0; i < num_loops; i++) { |
228 | cur_size = size; |
228 | cur_size = size; |
229 | if (cur_size > 0x1FFFFF) { |
229 | if (cur_size > 0x1FFFFF) { |
230 | cur_size = 0x1FFFFF; |
230 | cur_size = 0x1FFFFF; |
231 | } |
231 | } |
232 | size -= cur_size; |
232 | size -= cur_size; |
233 | radeon_ring_write(rdev, PACKET0(0x720, 2)); |
233 | radeon_ring_write(rdev, PACKET0(0x720, 2)); |
234 | radeon_ring_write(rdev, src_offset); |
234 | radeon_ring_write(rdev, src_offset); |
235 | radeon_ring_write(rdev, dst_offset); |
235 | radeon_ring_write(rdev, dst_offset); |
236 | radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); |
236 | radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); |
237 | src_offset += cur_size; |
237 | src_offset += cur_size; |
238 | dst_offset += cur_size; |
238 | dst_offset += cur_size; |
239 | } |
239 | } |
240 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
240 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
241 | radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); |
241 | radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); |
242 | if (fence) { |
242 | if (fence) { |
243 | r = radeon_fence_emit(rdev, fence); |
243 | r = radeon_fence_emit(rdev, fence); |
244 | } |
244 | } |
245 | radeon_ring_unlock_commit(rdev); |
245 | radeon_ring_unlock_commit(rdev); |
246 | return r; |
246 | return r; |
247 | } |
247 | } |
248 | 248 | ||
249 | #endif |
249 | #endif |
250 | 250 | ||
251 | void r300_ring_start(struct radeon_device *rdev) |
251 | void r300_ring_start(struct radeon_device *rdev) |
252 | { |
252 | { |
253 | unsigned gb_tile_config; |
253 | unsigned gb_tile_config; |
254 | int r; |
254 | int r; |
255 | 255 | ||
256 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
256 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
257 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
257 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
258 | switch(rdev->num_gb_pipes) { |
258 | switch(rdev->num_gb_pipes) { |
259 | case 2: |
259 | case 2: |
260 | gb_tile_config |= R300_PIPE_COUNT_R300; |
260 | gb_tile_config |= R300_PIPE_COUNT_R300; |
261 | break; |
261 | break; |
262 | case 3: |
262 | case 3: |
263 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
263 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
264 | break; |
264 | break; |
265 | case 4: |
265 | case 4: |
266 | gb_tile_config |= R300_PIPE_COUNT_R420; |
266 | gb_tile_config |= R300_PIPE_COUNT_R420; |
267 | break; |
267 | break; |
268 | case 1: |
268 | case 1: |
269 | default: |
269 | default: |
270 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
270 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
271 | break; |
271 | break; |
272 | } |
272 | } |
273 | 273 | ||
274 | r = radeon_ring_lock(rdev, 64); |
274 | r = radeon_ring_lock(rdev, 64); |
275 | if (r) { |
275 | if (r) { |
276 | return; |
276 | return; |
277 | } |
277 | } |
278 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); |
278 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); |
279 | radeon_ring_write(rdev, |
279 | radeon_ring_write(rdev, |
280 | RADEON_ISYNC_ANY2D_IDLE3D | |
280 | RADEON_ISYNC_ANY2D_IDLE3D | |
281 | RADEON_ISYNC_ANY3D_IDLE2D | |
281 | RADEON_ISYNC_ANY3D_IDLE2D | |
282 | RADEON_ISYNC_WAIT_IDLEGUI | |
282 | RADEON_ISYNC_WAIT_IDLEGUI | |
283 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
283 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
284 | radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); |
284 | radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); |
285 | radeon_ring_write(rdev, gb_tile_config); |
285 | radeon_ring_write(rdev, gb_tile_config); |
286 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
286 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
287 | radeon_ring_write(rdev, |
287 | radeon_ring_write(rdev, |
288 | RADEON_WAIT_2D_IDLECLEAN | |
288 | RADEON_WAIT_2D_IDLECLEAN | |
289 | RADEON_WAIT_3D_IDLECLEAN); |
289 | RADEON_WAIT_3D_IDLECLEAN); |
290 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); |
290 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); |
291 | radeon_ring_write(rdev, 1 << 31); |
291 | radeon_ring_write(rdev, 1 << 31); |
292 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); |
292 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); |
293 | radeon_ring_write(rdev, 0); |
293 | radeon_ring_write(rdev, 0); |
294 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); |
294 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); |
295 | radeon_ring_write(rdev, 0); |
295 | radeon_ring_write(rdev, 0); |
296 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
296 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
297 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
297 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
298 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
298 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
299 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
299 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
300 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
300 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
301 | radeon_ring_write(rdev, |
301 | radeon_ring_write(rdev, |
302 | RADEON_WAIT_2D_IDLECLEAN | |
302 | RADEON_WAIT_2D_IDLECLEAN | |
303 | RADEON_WAIT_3D_IDLECLEAN); |
303 | RADEON_WAIT_3D_IDLECLEAN); |
304 | radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); |
304 | radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); |
305 | radeon_ring_write(rdev, 0); |
305 | radeon_ring_write(rdev, 0); |
306 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
306 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
307 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
307 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
308 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
308 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
309 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
309 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
310 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); |
310 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); |
311 | radeon_ring_write(rdev, |
311 | radeon_ring_write(rdev, |
312 | ((6 << R300_MS_X0_SHIFT) | |
312 | ((6 << R300_MS_X0_SHIFT) | |
313 | (6 << R300_MS_Y0_SHIFT) | |
313 | (6 << R300_MS_Y0_SHIFT) | |
314 | (6 << R300_MS_X1_SHIFT) | |
314 | (6 << R300_MS_X1_SHIFT) | |
315 | (6 << R300_MS_Y1_SHIFT) | |
315 | (6 << R300_MS_Y1_SHIFT) | |
316 | (6 << R300_MS_X2_SHIFT) | |
316 | (6 << R300_MS_X2_SHIFT) | |
317 | (6 << R300_MS_Y2_SHIFT) | |
317 | (6 << R300_MS_Y2_SHIFT) | |
318 | (6 << R300_MSBD0_Y_SHIFT) | |
318 | (6 << R300_MSBD0_Y_SHIFT) | |
319 | (6 << R300_MSBD0_X_SHIFT))); |
319 | (6 << R300_MSBD0_X_SHIFT))); |
320 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); |
320 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); |
321 | radeon_ring_write(rdev, |
321 | radeon_ring_write(rdev, |
322 | ((6 << R300_MS_X3_SHIFT) | |
322 | ((6 << R300_MS_X3_SHIFT) | |
323 | (6 << R300_MS_Y3_SHIFT) | |
323 | (6 << R300_MS_Y3_SHIFT) | |
324 | (6 << R300_MS_X4_SHIFT) | |
324 | (6 << R300_MS_X4_SHIFT) | |
325 | (6 << R300_MS_Y4_SHIFT) | |
325 | (6 << R300_MS_Y4_SHIFT) | |
326 | (6 << R300_MS_X5_SHIFT) | |
326 | (6 << R300_MS_X5_SHIFT) | |
327 | (6 << R300_MS_Y5_SHIFT) | |
327 | (6 << R300_MS_Y5_SHIFT) | |
328 | (6 << R300_MSBD1_SHIFT))); |
328 | (6 << R300_MSBD1_SHIFT))); |
329 | radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); |
329 | radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); |
330 | radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
330 | radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
331 | radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); |
331 | radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); |
332 | radeon_ring_write(rdev, |
332 | radeon_ring_write(rdev, |
333 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
333 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
334 | radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); |
334 | radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); |
335 | radeon_ring_write(rdev, |
335 | radeon_ring_write(rdev, |
336 | R300_GEOMETRY_ROUND_NEAREST | |
336 | R300_GEOMETRY_ROUND_NEAREST | |
337 | R300_COLOR_ROUND_NEAREST); |
337 | R300_COLOR_ROUND_NEAREST); |
338 | radeon_ring_unlock_commit(rdev); |
338 | radeon_ring_unlock_commit(rdev); |
339 | } |
339 | } |
340 | 340 | ||
341 | void r300_errata(struct radeon_device *rdev) |
341 | void r300_errata(struct radeon_device *rdev) |
342 | { |
342 | { |
343 | rdev->pll_errata = 0; |
343 | rdev->pll_errata = 0; |
344 | 344 | ||
345 | if (rdev->family == CHIP_R300 && |
345 | if (rdev->family == CHIP_R300 && |
346 | (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { |
346 | (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { |
347 | rdev->pll_errata |= CHIP_ERRATA_R300_CG; |
347 | rdev->pll_errata |= CHIP_ERRATA_R300_CG; |
348 | } |
348 | } |
349 | } |
349 | } |
350 | 350 | ||
351 | int r300_mc_wait_for_idle(struct radeon_device *rdev) |
351 | int r300_mc_wait_for_idle(struct radeon_device *rdev) |
352 | { |
352 | { |
353 | unsigned i; |
353 | unsigned i; |
354 | uint32_t tmp; |
354 | uint32_t tmp; |
355 | 355 | ||
356 | for (i = 0; i < rdev->usec_timeout; i++) { |
356 | for (i = 0; i < rdev->usec_timeout; i++) { |
357 | /* read MC_STATUS */ |
357 | /* read MC_STATUS */ |
358 | tmp = RREG32(0x0150); |
358 | tmp = RREG32(0x0150); |
359 | if (tmp & (1 << 4)) { |
359 | if (tmp & (1 << 4)) { |
360 | return 0; |
360 | return 0; |
361 | } |
361 | } |
362 | DRM_UDELAY(1); |
362 | DRM_UDELAY(1); |
363 | } |
363 | } |
364 | return -1; |
364 | return -1; |
365 | } |
365 | } |
366 | 366 | ||
367 | void r300_gpu_init(struct radeon_device *rdev) |
367 | void r300_gpu_init(struct radeon_device *rdev) |
368 | { |
368 | { |
369 | uint32_t gb_tile_config, tmp; |
369 | uint32_t gb_tile_config, tmp; |
370 | 370 | ||
371 | r100_hdp_reset(rdev); |
371 | r100_hdp_reset(rdev); |
372 | /* FIXME: rv380 one pipes ? */ |
372 | /* FIXME: rv380 one pipes ? */ |
373 | if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { |
373 | if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { |
374 | /* r300,r350 */ |
374 | /* r300,r350 */ |
375 | rdev->num_gb_pipes = 2; |
375 | rdev->num_gb_pipes = 2; |
376 | } else { |
376 | } else { |
377 | /* rv350,rv370,rv380 */ |
377 | /* rv350,rv370,rv380 */ |
378 | rdev->num_gb_pipes = 1; |
378 | rdev->num_gb_pipes = 1; |
379 | } |
379 | } |
380 | rdev->num_z_pipes = 1; |
380 | rdev->num_z_pipes = 1; |
381 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
381 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
382 | switch (rdev->num_gb_pipes) { |
382 | switch (rdev->num_gb_pipes) { |
383 | case 2: |
383 | case 2: |
384 | gb_tile_config |= R300_PIPE_COUNT_R300; |
384 | gb_tile_config |= R300_PIPE_COUNT_R300; |
385 | break; |
385 | break; |
386 | case 3: |
386 | case 3: |
387 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
387 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
388 | break; |
388 | break; |
389 | case 4: |
389 | case 4: |
390 | gb_tile_config |= R300_PIPE_COUNT_R420; |
390 | gb_tile_config |= R300_PIPE_COUNT_R420; |
391 | break; |
391 | break; |
392 | default: |
392 | default: |
393 | case 1: |
393 | case 1: |
394 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
394 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
395 | break; |
395 | break; |
396 | } |
396 | } |
397 | WREG32(R300_GB_TILE_CONFIG, gb_tile_config); |
397 | WREG32(R300_GB_TILE_CONFIG, gb_tile_config); |
398 | 398 | ||
399 | if (r100_gui_wait_for_idle(rdev)) { |
399 | if (r100_gui_wait_for_idle(rdev)) { |
400 | printk(KERN_WARNING "Failed to wait GUI idle while " |
400 | printk(KERN_WARNING "Failed to wait GUI idle while " |
401 | "programming pipes. Bad things might happen.\n"); |
401 | "programming pipes. Bad things might happen.\n"); |
402 | } |
402 | } |
403 | 403 | ||
404 | tmp = RREG32(0x170C); |
404 | tmp = RREG32(0x170C); |
405 | WREG32(0x170C, tmp | (1 << 31)); |
405 | WREG32(0x170C, tmp | (1 << 31)); |
406 | 406 | ||
407 | WREG32(R300_RB2D_DSTCACHE_MODE, |
407 | WREG32(R300_RB2D_DSTCACHE_MODE, |
408 | R300_DC_AUTOFLUSH_ENABLE | |
408 | R300_DC_AUTOFLUSH_ENABLE | |
409 | R300_DC_DC_DISABLE_IGNORE_PE); |
409 | R300_DC_DC_DISABLE_IGNORE_PE); |
410 | 410 | ||
411 | if (r100_gui_wait_for_idle(rdev)) { |
411 | if (r100_gui_wait_for_idle(rdev)) { |
412 | printk(KERN_WARNING "Failed to wait GUI idle while " |
412 | printk(KERN_WARNING "Failed to wait GUI idle while " |
413 | "programming pipes. Bad things might happen.\n"); |
413 | "programming pipes. Bad things might happen.\n"); |
414 | } |
414 | } |
415 | if (r300_mc_wait_for_idle(rdev)) { |
415 | if (r300_mc_wait_for_idle(rdev)) { |
416 | printk(KERN_WARNING "Failed to wait MC idle while " |
416 | printk(KERN_WARNING "Failed to wait MC idle while " |
417 | "programming pipes. Bad things might happen.\n"); |
417 | "programming pipes. Bad things might happen.\n"); |
418 | } |
418 | } |
419 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", |
419 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", |
420 | rdev->num_gb_pipes, rdev->num_z_pipes); |
420 | rdev->num_gb_pipes, rdev->num_z_pipes); |
421 | } |
421 | } |
422 | 422 | ||
423 | int r300_ga_reset(struct radeon_device *rdev) |
423 | int r300_ga_reset(struct radeon_device *rdev) |
424 | { |
424 | { |
425 | uint32_t tmp; |
425 | uint32_t tmp; |
426 | bool reinit_cp; |
426 | bool reinit_cp; |
427 | int i; |
427 | int i; |
428 | 428 | ||
429 | reinit_cp = rdev->cp.ready; |
429 | reinit_cp = rdev->cp.ready; |
430 | rdev->cp.ready = false; |
430 | rdev->cp.ready = false; |
431 | for (i = 0; i < rdev->usec_timeout; i++) { |
431 | for (i = 0; i < rdev->usec_timeout; i++) { |
432 | WREG32(RADEON_CP_CSQ_MODE, 0); |
432 | WREG32(RADEON_CP_CSQ_MODE, 0); |
433 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
433 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
434 | WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); |
434 | WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); |
435 | (void)RREG32(RADEON_RBBM_SOFT_RESET); |
435 | (void)RREG32(RADEON_RBBM_SOFT_RESET); |
436 | udelay(200); |
436 | udelay(200); |
437 | WREG32(RADEON_RBBM_SOFT_RESET, 0); |
437 | WREG32(RADEON_RBBM_SOFT_RESET, 0); |
438 | /* Wait to prevent race in RBBM_STATUS */ |
438 | /* Wait to prevent race in RBBM_STATUS */ |
439 | mdelay(1); |
439 | mdelay(1); |
440 | tmp = RREG32(RADEON_RBBM_STATUS); |
440 | tmp = RREG32(RADEON_RBBM_STATUS); |
441 | if (tmp & ((1 << 20) | (1 << 26))) { |
441 | if (tmp & ((1 << 20) | (1 << 26))) { |
442 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp); |
442 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp); |
443 | /* GA still busy soft reset it */ |
443 | /* GA still busy soft reset it */ |
444 | WREG32(0x429C, 0x200); |
444 | WREG32(0x429C, 0x200); |
445 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); |
445 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); |
446 | WREG32(0x43E0, 0); |
446 | WREG32(0x43E0, 0); |
447 | WREG32(0x43E4, 0); |
447 | WREG32(0x43E4, 0); |
448 | WREG32(0x24AC, 0); |
448 | WREG32(0x24AC, 0); |
449 | } |
449 | } |
450 | /* Wait to prevent race in RBBM_STATUS */ |
450 | /* Wait to prevent race in RBBM_STATUS */ |
451 | mdelay(1); |
451 | mdelay(1); |
452 | tmp = RREG32(RADEON_RBBM_STATUS); |
452 | tmp = RREG32(RADEON_RBBM_STATUS); |
453 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
453 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
454 | break; |
454 | break; |
455 | } |
455 | } |
456 | } |
456 | } |
457 | for (i = 0; i < rdev->usec_timeout; i++) { |
457 | for (i = 0; i < rdev->usec_timeout; i++) { |
458 | tmp = RREG32(RADEON_RBBM_STATUS); |
458 | tmp = RREG32(RADEON_RBBM_STATUS); |
459 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
459 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
460 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
460 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
461 | tmp); |
461 | tmp); |
462 | if (reinit_cp) { |
462 | if (reinit_cp) { |
463 | return r100_cp_init(rdev, rdev->cp.ring_size); |
463 | return r100_cp_init(rdev, rdev->cp.ring_size); |
464 | } |
464 | } |
465 | return 0; |
465 | return 0; |
466 | } |
466 | } |
467 | DRM_UDELAY(1); |
467 | DRM_UDELAY(1); |
468 | } |
468 | } |
469 | tmp = RREG32(RADEON_RBBM_STATUS); |
469 | tmp = RREG32(RADEON_RBBM_STATUS); |
470 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
470 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
471 | return -1; |
471 | return -1; |
472 | } |
472 | } |
473 | 473 | ||
474 | int r300_gpu_reset(struct radeon_device *rdev) |
474 | int r300_gpu_reset(struct radeon_device *rdev) |
475 | { |
475 | { |
476 | uint32_t status; |
476 | uint32_t status; |
477 | 477 | ||
478 | /* reset order likely matter */ |
478 | /* reset order likely matter */ |
479 | status = RREG32(RADEON_RBBM_STATUS); |
479 | status = RREG32(RADEON_RBBM_STATUS); |
480 | /* reset HDP */ |
480 | /* reset HDP */ |
481 | r100_hdp_reset(rdev); |
481 | r100_hdp_reset(rdev); |
482 | /* reset rb2d */ |
482 | /* reset rb2d */ |
483 | if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { |
483 | if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { |
484 | r100_rb2d_reset(rdev); |
484 | r100_rb2d_reset(rdev); |
485 | } |
485 | } |
486 | /* reset GA */ |
486 | /* reset GA */ |
487 | if (status & ((1 << 20) | (1 << 26))) { |
487 | if (status & ((1 << 20) | (1 << 26))) { |
488 | r300_ga_reset(rdev); |
488 | r300_ga_reset(rdev); |
489 | } |
489 | } |
490 | /* reset CP */ |
490 | /* reset CP */ |
491 | status = RREG32(RADEON_RBBM_STATUS); |
491 | status = RREG32(RADEON_RBBM_STATUS); |
492 | if (status & (1 << 16)) { |
492 | if (status & (1 << 16)) { |
493 | r100_cp_reset(rdev); |
493 | r100_cp_reset(rdev); |
494 | } |
494 | } |
495 | /* Check if GPU is idle */ |
495 | /* Check if GPU is idle */ |
496 | status = RREG32(RADEON_RBBM_STATUS); |
496 | status = RREG32(RADEON_RBBM_STATUS); |
497 | if (status & (1 << 31)) { |
497 | if (status & (1 << 31)) { |
498 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
498 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
499 | return -1; |
499 | return -1; |
500 | } |
500 | } |
501 | DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); |
501 | DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); |
502 | return 0; |
502 | return 0; |
503 | } |
503 | } |
504 | 504 | ||
505 | 505 | ||
506 | /* |
506 | /* |
507 | * r300,r350,rv350,rv380 VRAM info |
507 | * r300,r350,rv350,rv380 VRAM info |
508 | */ |
508 | */ |
509 | void r300_vram_info(struct radeon_device *rdev) |
509 | void r300_vram_info(struct radeon_device *rdev) |
510 | { |
510 | { |
511 | uint32_t tmp; |
511 | uint32_t tmp; |
512 | 512 | ||
513 | /* DDR for all card after R300 & IGP */ |
513 | /* DDR for all card after R300 & IGP */ |
514 | rdev->mc.vram_is_ddr = true; |
514 | rdev->mc.vram_is_ddr = true; |
515 | 515 | ||
516 | tmp = RREG32(RADEON_MEM_CNTL); |
516 | tmp = RREG32(RADEON_MEM_CNTL); |
517 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
517 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
518 | switch (tmp) { |
518 | switch (tmp) { |
519 | case 0: rdev->mc.vram_width = 64; break; |
519 | case 0: rdev->mc.vram_width = 64; break; |
520 | case 1: rdev->mc.vram_width = 128; break; |
520 | case 1: rdev->mc.vram_width = 128; break; |
521 | case 2: rdev->mc.vram_width = 256; break; |
521 | case 2: rdev->mc.vram_width = 256; break; |
522 | default: rdev->mc.vram_width = 128; break; |
522 | default: rdev->mc.vram_width = 128; break; |
523 | } |
523 | } |
524 | 524 | ||
525 | r100_vram_init_sizes(rdev); |
525 | r100_vram_init_sizes(rdev); |
526 | } |
526 | } |
527 | 527 | ||
528 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
528 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
529 | { |
529 | { |
530 | uint32_t link_width_cntl, mask; |
530 | uint32_t link_width_cntl, mask; |
531 | 531 | ||
532 | if (rdev->flags & RADEON_IS_IGP) |
532 | if (rdev->flags & RADEON_IS_IGP) |
533 | return; |
533 | return; |
534 | 534 | ||
535 | if (!(rdev->flags & RADEON_IS_PCIE)) |
535 | if (!(rdev->flags & RADEON_IS_PCIE)) |
536 | return; |
536 | return; |
537 | 537 | ||
538 | /* FIXME wait for idle */ |
538 | /* FIXME wait for idle */ |
539 | 539 | ||
540 | switch (lanes) { |
540 | switch (lanes) { |
541 | case 0: |
541 | case 0: |
542 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; |
542 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; |
543 | break; |
543 | break; |
544 | case 1: |
544 | case 1: |
545 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; |
545 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; |
546 | break; |
546 | break; |
547 | case 2: |
547 | case 2: |
548 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; |
548 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; |
549 | break; |
549 | break; |
550 | case 4: |
550 | case 4: |
551 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; |
551 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; |
552 | break; |
552 | break; |
553 | case 8: |
553 | case 8: |
554 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; |
554 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; |
555 | break; |
555 | break; |
556 | case 12: |
556 | case 12: |
557 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; |
557 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; |
558 | break; |
558 | break; |
559 | case 16: |
559 | case 16: |
560 | default: |
560 | default: |
561 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; |
561 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; |
562 | break; |
562 | break; |
563 | } |
563 | } |
564 | 564 | ||
565 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
565 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
566 | 566 | ||
567 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == |
567 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == |
568 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) |
568 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) |
569 | return; |
569 | return; |
570 | 570 | ||
571 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | |
571 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | |
572 | RADEON_PCIE_LC_RECONFIG_NOW | |
572 | RADEON_PCIE_LC_RECONFIG_NOW | |
573 | RADEON_PCIE_LC_RECONFIG_LATER | |
573 | RADEON_PCIE_LC_RECONFIG_LATER | |
574 | RADEON_PCIE_LC_SHORT_RECONFIG_EN); |
574 | RADEON_PCIE_LC_SHORT_RECONFIG_EN); |
575 | link_width_cntl |= mask; |
575 | link_width_cntl |= mask; |
576 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
576 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
577 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | |
577 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | |
578 | RADEON_PCIE_LC_RECONFIG_NOW)); |
578 | RADEON_PCIE_LC_RECONFIG_NOW)); |
579 | 579 | ||
580 | /* wait for lane set to complete */ |
580 | /* wait for lane set to complete */ |
581 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
581 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
582 | while (link_width_cntl == 0xffffffff) |
582 | while (link_width_cntl == 0xffffffff) |
583 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
583 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
584 | 584 | ||
585 | } |
585 | } |
586 | 586 | ||
587 | #if defined(CONFIG_DEBUG_FS) |
587 | #if defined(CONFIG_DEBUG_FS) |
588 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
588 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
589 | { |
589 | { |
590 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
590 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
591 | struct drm_device *dev = node->minor->dev; |
591 | struct drm_device *dev = node->minor->dev; |
592 | struct radeon_device *rdev = dev->dev_private; |
592 | struct radeon_device *rdev = dev->dev_private; |
593 | uint32_t tmp; |
593 | uint32_t tmp; |
594 | 594 | ||
595 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
595 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
596 | seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); |
596 | seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); |
597 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); |
597 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); |
598 | seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); |
598 | seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); |
599 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); |
599 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); |
600 | seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); |
600 | seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); |
601 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); |
601 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); |
602 | seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); |
602 | seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); |
603 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); |
603 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); |
604 | seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); |
604 | seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); |
605 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); |
605 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); |
606 | seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); |
606 | seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); |
607 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); |
607 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); |
608 | seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); |
608 | seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); |
609 | return 0; |
609 | return 0; |
610 | } |
610 | } |
611 | 611 | ||
612 | static struct drm_info_list rv370_pcie_gart_info_list[] = { |
612 | static struct drm_info_list rv370_pcie_gart_info_list[] = { |
613 | {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, |
613 | {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, |
614 | }; |
614 | }; |
615 | #endif |
615 | #endif |
616 | 616 | ||
617 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
617 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
618 | { |
618 | { |
619 | #if defined(CONFIG_DEBUG_FS) |
619 | #if defined(CONFIG_DEBUG_FS) |
620 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
620 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
621 | #else |
621 | #else |
622 | return 0; |
622 | return 0; |
623 | #endif |
623 | #endif |
624 | } |
624 | } |
625 | 625 | ||
626 | 626 | ||
627 | #if 0 |
627 | #if 0 |
628 | 628 | ||
629 | static int r300_packet0_check(struct radeon_cs_parser *p, |
629 | static int r300_packet0_check(struct radeon_cs_parser *p, |
630 | struct radeon_cs_packet *pkt, |
630 | struct radeon_cs_packet *pkt, |
631 | unsigned idx, unsigned reg) |
631 | unsigned idx, unsigned reg) |
632 | { |
632 | { |
633 | struct radeon_cs_reloc *reloc; |
633 | struct radeon_cs_reloc *reloc; |
634 | struct r100_cs_track *track; |
634 | struct r100_cs_track *track; |
635 | volatile uint32_t *ib; |
635 | volatile uint32_t *ib; |
636 | uint32_t tmp, tile_flags = 0; |
636 | uint32_t tmp, tile_flags = 0; |
637 | unsigned i; |
637 | unsigned i; |
638 | int r; |
638 | int r; |
639 | u32 idx_value; |
639 | u32 idx_value; |
640 | 640 | ||
641 | ib = p->ib->ptr; |
641 | ib = p->ib->ptr; |
642 | track = (struct r100_cs_track *)p->track; |
642 | track = (struct r100_cs_track *)p->track; |
643 | idx_value = radeon_get_ib_value(p, idx); |
643 | idx_value = radeon_get_ib_value(p, idx); |
644 | 644 | ||
645 | switch(reg) { |
645 | switch(reg) { |
646 | case AVIVO_D1MODE_VLINE_START_END: |
646 | case AVIVO_D1MODE_VLINE_START_END: |
647 | case RADEON_CRTC_GUI_TRIG_VLINE: |
647 | case RADEON_CRTC_GUI_TRIG_VLINE: |
648 | r = r100_cs_packet_parse_vline(p); |
648 | r = r100_cs_packet_parse_vline(p); |
649 | if (r) { |
649 | if (r) { |
650 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
650 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
651 | idx, reg); |
651 | idx, reg); |
652 | r100_cs_dump_packet(p, pkt); |
652 | r100_cs_dump_packet(p, pkt); |
653 | return r; |
653 | return r; |
654 | } |
654 | } |
655 | break; |
655 | break; |
656 | case RADEON_DST_PITCH_OFFSET: |
656 | case RADEON_DST_PITCH_OFFSET: |
657 | case RADEON_SRC_PITCH_OFFSET: |
657 | case RADEON_SRC_PITCH_OFFSET: |
658 | r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
658 | r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
659 | if (r) |
659 | if (r) |
660 | return r; |
660 | return r; |
661 | break; |
661 | break; |
662 | case R300_RB3D_COLOROFFSET0: |
662 | case R300_RB3D_COLOROFFSET0: |
663 | case R300_RB3D_COLOROFFSET1: |
663 | case R300_RB3D_COLOROFFSET1: |
664 | case R300_RB3D_COLOROFFSET2: |
664 | case R300_RB3D_COLOROFFSET2: |
665 | case R300_RB3D_COLOROFFSET3: |
665 | case R300_RB3D_COLOROFFSET3: |
666 | i = (reg - R300_RB3D_COLOROFFSET0) >> 2; |
666 | i = (reg - R300_RB3D_COLOROFFSET0) >> 2; |
667 | r = r100_cs_packet_next_reloc(p, &reloc); |
667 | r = r100_cs_packet_next_reloc(p, &reloc); |
668 | if (r) { |
668 | if (r) { |
669 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
669 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
670 | idx, reg); |
670 | idx, reg); |
671 | r100_cs_dump_packet(p, pkt); |
671 | r100_cs_dump_packet(p, pkt); |
672 | return r; |
672 | return r; |
673 | } |
673 | } |
674 | track->cb[i].robj = reloc->robj; |
674 | track->cb[i].robj = reloc->robj; |
675 | track->cb[i].offset = idx_value; |
675 | track->cb[i].offset = idx_value; |
676 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
676 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
677 | break; |
677 | break; |
678 | case R300_ZB_DEPTHOFFSET: |
678 | case R300_ZB_DEPTHOFFSET: |
679 | r = r100_cs_packet_next_reloc(p, &reloc); |
679 | r = r100_cs_packet_next_reloc(p, &reloc); |
680 | if (r) { |
680 | if (r) { |
681 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
681 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
682 | idx, reg); |
682 | idx, reg); |
683 | r100_cs_dump_packet(p, pkt); |
683 | r100_cs_dump_packet(p, pkt); |
684 | return r; |
684 | return r; |
685 | } |
685 | } |
686 | track->zb.robj = reloc->robj; |
686 | track->zb.robj = reloc->robj; |
687 | track->zb.offset = idx_value; |
687 | track->zb.offset = idx_value; |
688 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
688 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
689 | break; |
689 | break; |
690 | case R300_TX_OFFSET_0: |
690 | case R300_TX_OFFSET_0: |
691 | case R300_TX_OFFSET_0+4: |
691 | case R300_TX_OFFSET_0+4: |
692 | case R300_TX_OFFSET_0+8: |
692 | case R300_TX_OFFSET_0+8: |
693 | case R300_TX_OFFSET_0+12: |
693 | case R300_TX_OFFSET_0+12: |
694 | case R300_TX_OFFSET_0+16: |
694 | case R300_TX_OFFSET_0+16: |
695 | case R300_TX_OFFSET_0+20: |
695 | case R300_TX_OFFSET_0+20: |
696 | case R300_TX_OFFSET_0+24: |
696 | case R300_TX_OFFSET_0+24: |
697 | case R300_TX_OFFSET_0+28: |
697 | case R300_TX_OFFSET_0+28: |
698 | case R300_TX_OFFSET_0+32: |
698 | case R300_TX_OFFSET_0+32: |
699 | case R300_TX_OFFSET_0+36: |
699 | case R300_TX_OFFSET_0+36: |
700 | case R300_TX_OFFSET_0+40: |
700 | case R300_TX_OFFSET_0+40: |
701 | case R300_TX_OFFSET_0+44: |
701 | case R300_TX_OFFSET_0+44: |
702 | case R300_TX_OFFSET_0+48: |
702 | case R300_TX_OFFSET_0+48: |
703 | case R300_TX_OFFSET_0+52: |
703 | case R300_TX_OFFSET_0+52: |
704 | case R300_TX_OFFSET_0+56: |
704 | case R300_TX_OFFSET_0+56: |
705 | case R300_TX_OFFSET_0+60: |
705 | case R300_TX_OFFSET_0+60: |
706 | i = (reg - R300_TX_OFFSET_0) >> 2; |
706 | i = (reg - R300_TX_OFFSET_0) >> 2; |
707 | r = r100_cs_packet_next_reloc(p, &reloc); |
707 | r = r100_cs_packet_next_reloc(p, &reloc); |
708 | if (r) { |
708 | if (r) { |
709 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
709 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
710 | idx, reg); |
710 | idx, reg); |
711 | r100_cs_dump_packet(p, pkt); |
711 | r100_cs_dump_packet(p, pkt); |
712 | return r; |
712 | return r; |
713 | } |
713 | } |
714 | 714 | ||
715 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
715 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
716 | tile_flags |= R300_TXO_MACRO_TILE; |
716 | tile_flags |= R300_TXO_MACRO_TILE; |
717 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
717 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
718 | tile_flags |= R300_TXO_MICRO_TILE; |
718 | tile_flags |= R300_TXO_MICRO_TILE; |
719 | 719 | ||
720 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); |
720 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); |
721 | tmp |= tile_flags; |
721 | tmp |= tile_flags; |
722 | ib[idx] = tmp; |
722 | ib[idx] = tmp; |
723 | track->textures[i].robj = reloc->robj; |
723 | track->textures[i].robj = reloc->robj; |
724 | break; |
724 | break; |
725 | /* Tracked registers */ |
725 | /* Tracked registers */ |
726 | case 0x2084: |
726 | case 0x2084: |
727 | /* VAP_VF_CNTL */ |
727 | /* VAP_VF_CNTL */ |
728 | track->vap_vf_cntl = idx_value; |
728 | track->vap_vf_cntl = idx_value; |
729 | break; |
729 | break; |
730 | case 0x20B4: |
730 | case 0x20B4: |
731 | /* VAP_VTX_SIZE */ |
731 | /* VAP_VTX_SIZE */ |
732 | track->vtx_size = idx_value & 0x7F; |
732 | track->vtx_size = idx_value & 0x7F; |
733 | break; |
733 | break; |
734 | case 0x2134: |
734 | case 0x2134: |
735 | /* VAP_VF_MAX_VTX_INDX */ |
735 | /* VAP_VF_MAX_VTX_INDX */ |
736 | track->max_indx = idx_value & 0x00FFFFFFUL; |
736 | track->max_indx = idx_value & 0x00FFFFFFUL; |
737 | break; |
737 | break; |
738 | case 0x43E4: |
738 | case 0x43E4: |
739 | /* SC_SCISSOR1 */ |
739 | /* SC_SCISSOR1 */ |
740 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
740 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
741 | if (p->rdev->family < CHIP_RV515) { |
741 | if (p->rdev->family < CHIP_RV515) { |
742 | track->maxy -= 1440; |
742 | track->maxy -= 1440; |
743 | } |
743 | } |
744 | break; |
744 | break; |
745 | case 0x4E00: |
745 | case 0x4E00: |
746 | /* RB3D_CCTL */ |
746 | /* RB3D_CCTL */ |
747 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
747 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
748 | break; |
748 | break; |
749 | case 0x4E38: |
749 | case 0x4E38: |
750 | case 0x4E3C: |
750 | case 0x4E3C: |
751 | case 0x4E40: |
751 | case 0x4E40: |
752 | case 0x4E44: |
752 | case 0x4E44: |
753 | /* RB3D_COLORPITCH0 */ |
753 | /* RB3D_COLORPITCH0 */ |
754 | /* RB3D_COLORPITCH1 */ |
754 | /* RB3D_COLORPITCH1 */ |
755 | /* RB3D_COLORPITCH2 */ |
755 | /* RB3D_COLORPITCH2 */ |
756 | /* RB3D_COLORPITCH3 */ |
756 | /* RB3D_COLORPITCH3 */ |
757 | r = r100_cs_packet_next_reloc(p, &reloc); |
757 | r = r100_cs_packet_next_reloc(p, &reloc); |
758 | if (r) { |
758 | if (r) { |
759 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
759 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
760 | idx, reg); |
760 | idx, reg); |
761 | r100_cs_dump_packet(p, pkt); |
761 | r100_cs_dump_packet(p, pkt); |
762 | return r; |
762 | return r; |
763 | } |
763 | } |
764 | 764 | ||
765 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
765 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
766 | tile_flags |= R300_COLOR_TILE_ENABLE; |
766 | tile_flags |= R300_COLOR_TILE_ENABLE; |
767 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
767 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
768 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
768 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
769 | 769 | ||
770 | tmp = idx_value & ~(0x7 << 16); |
770 | tmp = idx_value & ~(0x7 << 16); |
771 | tmp |= tile_flags; |
771 | tmp |= tile_flags; |
772 | ib[idx] = tmp; |
772 | ib[idx] = tmp; |
773 | 773 | ||
774 | i = (reg - 0x4E38) >> 2; |
774 | i = (reg - 0x4E38) >> 2; |
775 | track->cb[i].pitch = idx_value & 0x3FFE; |
775 | track->cb[i].pitch = idx_value & 0x3FFE; |
776 | switch (((idx_value >> 21) & 0xF)) { |
776 | switch (((idx_value >> 21) & 0xF)) { |
777 | case 9: |
777 | case 9: |
778 | case 11: |
778 | case 11: |
779 | case 12: |
779 | case 12: |
780 | track->cb[i].cpp = 1; |
780 | track->cb[i].cpp = 1; |
781 | break; |
781 | break; |
782 | case 3: |
782 | case 3: |
783 | case 4: |
783 | case 4: |
784 | case 13: |
784 | case 13: |
785 | case 15: |
785 | case 15: |
786 | track->cb[i].cpp = 2; |
786 | track->cb[i].cpp = 2; |
787 | break; |
787 | break; |
788 | case 6: |
788 | case 6: |
789 | track->cb[i].cpp = 4; |
789 | track->cb[i].cpp = 4; |
790 | break; |
790 | break; |
791 | case 10: |
791 | case 10: |
792 | track->cb[i].cpp = 8; |
792 | track->cb[i].cpp = 8; |
793 | break; |
793 | break; |
794 | case 7: |
794 | case 7: |
795 | track->cb[i].cpp = 16; |
795 | track->cb[i].cpp = 16; |
796 | break; |
796 | break; |
797 | default: |
797 | default: |
798 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
798 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
799 | ((idx_value >> 21) & 0xF)); |
799 | ((idx_value >> 21) & 0xF)); |
800 | return -EINVAL; |
800 | return -EINVAL; |
801 | } |
801 | } |
802 | break; |
802 | break; |
803 | case 0x4F00: |
803 | case 0x4F00: |
804 | /* ZB_CNTL */ |
804 | /* ZB_CNTL */ |
805 | if (idx_value & 2) { |
805 | if (idx_value & 2) { |
806 | track->z_enabled = true; |
806 | track->z_enabled = true; |
807 | } else { |
807 | } else { |
808 | track->z_enabled = false; |
808 | track->z_enabled = false; |
809 | } |
809 | } |
810 | break; |
810 | break; |
811 | case 0x4F10: |
811 | case 0x4F10: |
812 | /* ZB_FORMAT */ |
812 | /* ZB_FORMAT */ |
813 | switch ((idx_value & 0xF)) { |
813 | switch ((idx_value & 0xF)) { |
814 | case 0: |
814 | case 0: |
815 | case 1: |
815 | case 1: |
816 | track->zb.cpp = 2; |
816 | track->zb.cpp = 2; |
817 | break; |
817 | break; |
818 | case 2: |
818 | case 2: |
819 | track->zb.cpp = 4; |
819 | track->zb.cpp = 4; |
820 | break; |
820 | break; |
821 | default: |
821 | default: |
822 | DRM_ERROR("Invalid z buffer format (%d) !\n", |
822 | DRM_ERROR("Invalid z buffer format (%d) !\n", |
823 | (idx_value & 0xF)); |
823 | (idx_value & 0xF)); |
824 | return -EINVAL; |
824 | return -EINVAL; |
825 | } |
825 | } |
826 | break; |
826 | break; |
827 | case 0x4F24: |
827 | case 0x4F24: |
828 | /* ZB_DEPTHPITCH */ |
828 | /* ZB_DEPTHPITCH */ |
829 | r = r100_cs_packet_next_reloc(p, &reloc); |
829 | r = r100_cs_packet_next_reloc(p, &reloc); |
830 | if (r) { |
830 | if (r) { |
831 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
831 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
832 | idx, reg); |
832 | idx, reg); |
833 | r100_cs_dump_packet(p, pkt); |
833 | r100_cs_dump_packet(p, pkt); |
834 | return r; |
834 | return r; |
835 | } |
835 | } |
836 | 836 | ||
837 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
837 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
838 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
838 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
839 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
839 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
840 | tile_flags |= R300_DEPTHMICROTILE_TILED;; |
840 | tile_flags |= R300_DEPTHMICROTILE_TILED;; |
841 | 841 | ||
842 | tmp = idx_value & ~(0x7 << 16); |
842 | tmp = idx_value & ~(0x7 << 16); |
843 | tmp |= tile_flags; |
843 | tmp |= tile_flags; |
844 | ib[idx] = tmp; |
844 | ib[idx] = tmp; |
845 | 845 | ||
846 | track->zb.pitch = idx_value & 0x3FFC; |
846 | track->zb.pitch = idx_value & 0x3FFC; |
847 | break; |
847 | break; |
848 | case 0x4104: |
848 | case 0x4104: |
849 | for (i = 0; i < 16; i++) { |
849 | for (i = 0; i < 16; i++) { |
850 | bool enabled; |
850 | bool enabled; |
851 | 851 | ||
852 | enabled = !!(idx_value & (1 << i)); |
852 | enabled = !!(idx_value & (1 << i)); |
853 | track->textures[i].enabled = enabled; |
853 | track->textures[i].enabled = enabled; |
854 | } |
854 | } |
855 | break; |
855 | break; |
856 | case 0x44C0: |
856 | case 0x44C0: |
857 | case 0x44C4: |
857 | case 0x44C4: |
858 | case 0x44C8: |
858 | case 0x44C8: |
859 | case 0x44CC: |
859 | case 0x44CC: |
860 | case 0x44D0: |
860 | case 0x44D0: |
861 | case 0x44D4: |
861 | case 0x44D4: |
862 | case 0x44D8: |
862 | case 0x44D8: |
863 | case 0x44DC: |
863 | case 0x44DC: |
864 | case 0x44E0: |
864 | case 0x44E0: |
865 | case 0x44E4: |
865 | case 0x44E4: |
866 | case 0x44E8: |
866 | case 0x44E8: |
867 | case 0x44EC: |
867 | case 0x44EC: |
868 | case 0x44F0: |
868 | case 0x44F0: |
869 | case 0x44F4: |
869 | case 0x44F4: |
870 | case 0x44F8: |
870 | case 0x44F8: |
871 | case 0x44FC: |
871 | case 0x44FC: |
872 | /* TX_FORMAT1_[0-15] */ |
872 | /* TX_FORMAT1_[0-15] */ |
873 | i = (reg - 0x44C0) >> 2; |
873 | i = (reg - 0x44C0) >> 2; |
874 | tmp = (idx_value >> 25) & 0x3; |
874 | tmp = (idx_value >> 25) & 0x3; |
875 | track->textures[i].tex_coord_type = tmp; |
875 | track->textures[i].tex_coord_type = tmp; |
876 | switch ((idx_value & 0x1F)) { |
876 | switch ((idx_value & 0x1F)) { |
877 | case R300_TX_FORMAT_X8: |
877 | case R300_TX_FORMAT_X8: |
878 | case R300_TX_FORMAT_Y4X4: |
878 | case R300_TX_FORMAT_Y4X4: |
879 | case R300_TX_FORMAT_Z3Y3X2: |
879 | case R300_TX_FORMAT_Z3Y3X2: |
880 | track->textures[i].cpp = 1; |
880 | track->textures[i].cpp = 1; |
881 | break; |
881 | break; |
882 | case R300_TX_FORMAT_X16: |
882 | case R300_TX_FORMAT_X16: |
883 | case R300_TX_FORMAT_Y8X8: |
883 | case R300_TX_FORMAT_Y8X8: |
884 | case R300_TX_FORMAT_Z5Y6X5: |
884 | case R300_TX_FORMAT_Z5Y6X5: |
885 | case R300_TX_FORMAT_Z6Y5X5: |
885 | case R300_TX_FORMAT_Z6Y5X5: |
886 | case R300_TX_FORMAT_W4Z4Y4X4: |
886 | case R300_TX_FORMAT_W4Z4Y4X4: |
887 | case R300_TX_FORMAT_W1Z5Y5X5: |
887 | case R300_TX_FORMAT_W1Z5Y5X5: |
888 | case R300_TX_FORMAT_D3DMFT_CxV8U8: |
888 | case R300_TX_FORMAT_D3DMFT_CxV8U8: |
889 | case R300_TX_FORMAT_B8G8_B8G8: |
889 | case R300_TX_FORMAT_B8G8_B8G8: |
890 | case R300_TX_FORMAT_G8R8_G8B8: |
890 | case R300_TX_FORMAT_G8R8_G8B8: |
891 | track->textures[i].cpp = 2; |
891 | track->textures[i].cpp = 2; |
892 | break; |
892 | break; |
893 | case R300_TX_FORMAT_Y16X16: |
893 | case R300_TX_FORMAT_Y16X16: |
894 | case R300_TX_FORMAT_Z11Y11X10: |
894 | case R300_TX_FORMAT_Z11Y11X10: |
895 | case R300_TX_FORMAT_Z10Y11X11: |
895 | case R300_TX_FORMAT_Z10Y11X11: |
896 | case R300_TX_FORMAT_W8Z8Y8X8: |
896 | case R300_TX_FORMAT_W8Z8Y8X8: |
897 | case R300_TX_FORMAT_W2Z10Y10X10: |
897 | case R300_TX_FORMAT_W2Z10Y10X10: |
898 | case 0x17: |
898 | case 0x17: |
899 | case R300_TX_FORMAT_FL_I32: |
899 | case R300_TX_FORMAT_FL_I32: |
900 | case 0x1e: |
900 | case 0x1e: |
901 | track->textures[i].cpp = 4; |
901 | track->textures[i].cpp = 4; |
902 | break; |
902 | break; |
903 | case R300_TX_FORMAT_W16Z16Y16X16: |
903 | case R300_TX_FORMAT_W16Z16Y16X16: |
904 | case R300_TX_FORMAT_FL_R16G16B16A16: |
904 | case R300_TX_FORMAT_FL_R16G16B16A16: |
905 | case R300_TX_FORMAT_FL_I32A32: |
905 | case R300_TX_FORMAT_FL_I32A32: |
906 | track->textures[i].cpp = 8; |
906 | track->textures[i].cpp = 8; |
907 | break; |
907 | break; |
908 | case R300_TX_FORMAT_FL_R32G32B32A32: |
908 | case R300_TX_FORMAT_FL_R32G32B32A32: |
909 | track->textures[i].cpp = 16; |
909 | track->textures[i].cpp = 16; |
910 | break; |
910 | break; |
911 | case R300_TX_FORMAT_DXT1: |
911 | case R300_TX_FORMAT_DXT1: |
912 | track->textures[i].cpp = 1; |
912 | track->textures[i].cpp = 1; |
913 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
913 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
914 | break; |
914 | break; |
915 | case R300_TX_FORMAT_ATI2N: |
915 | case R300_TX_FORMAT_ATI2N: |
916 | if (p->rdev->family < CHIP_R420) { |
916 | if (p->rdev->family < CHIP_R420) { |
917 | DRM_ERROR("Invalid texture format %u\n", |
917 | DRM_ERROR("Invalid texture format %u\n", |
918 | (idx_value & 0x1F)); |
918 | (idx_value & 0x1F)); |
919 | return -EINVAL; |
919 | return -EINVAL; |
920 | } |
920 | } |
921 | /* The same rules apply as for DXT3/5. */ |
921 | /* The same rules apply as for DXT3/5. */ |
922 | /* Pass through. */ |
922 | /* Pass through. */ |
923 | case R300_TX_FORMAT_DXT3: |
923 | case R300_TX_FORMAT_DXT3: |
924 | case R300_TX_FORMAT_DXT5: |
924 | case R300_TX_FORMAT_DXT5: |
925 | track->textures[i].cpp = 1; |
925 | track->textures[i].cpp = 1; |
926 | track->textures[i].compress_format = R100_TRACK_COMP_DXT35; |
926 | track->textures[i].compress_format = R100_TRACK_COMP_DXT35; |
927 | break; |
927 | break; |
928 | default: |
928 | default: |
929 | DRM_ERROR("Invalid texture format %u\n", |
929 | DRM_ERROR("Invalid texture format %u\n", |
930 | (idx_value & 0x1F)); |
930 | (idx_value & 0x1F)); |
931 | return -EINVAL; |
931 | return -EINVAL; |
932 | break; |
932 | break; |
933 | } |
933 | } |
934 | break; |
934 | break; |
935 | case 0x4400: |
935 | case 0x4400: |
936 | case 0x4404: |
936 | case 0x4404: |
937 | case 0x4408: |
937 | case 0x4408: |
938 | case 0x440C: |
938 | case 0x440C: |
939 | case 0x4410: |
939 | case 0x4410: |
940 | case 0x4414: |
940 | case 0x4414: |
941 | case 0x4418: |
941 | case 0x4418: |
942 | case 0x441C: |
942 | case 0x441C: |
943 | case 0x4420: |
943 | case 0x4420: |
944 | case 0x4424: |
944 | case 0x4424: |
945 | case 0x4428: |
945 | case 0x4428: |
946 | case 0x442C: |
946 | case 0x442C: |
947 | case 0x4430: |
947 | case 0x4430: |
948 | case 0x4434: |
948 | case 0x4434: |
949 | case 0x4438: |
949 | case 0x4438: |
950 | case 0x443C: |
950 | case 0x443C: |
951 | /* TX_FILTER0_[0-15] */ |
951 | /* TX_FILTER0_[0-15] */ |
952 | i = (reg - 0x4400) >> 2; |
952 | i = (reg - 0x4400) >> 2; |
953 | tmp = idx_value & 0x7; |
953 | tmp = idx_value & 0x7; |
954 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
954 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
955 | track->textures[i].roundup_w = false; |
955 | track->textures[i].roundup_w = false; |
956 | } |
956 | } |
957 | tmp = (idx_value >> 3) & 0x7; |
957 | tmp = (idx_value >> 3) & 0x7; |
958 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
958 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
959 | track->textures[i].roundup_h = false; |
959 | track->textures[i].roundup_h = false; |
960 | } |
960 | } |
961 | break; |
961 | break; |
962 | case 0x4500: |
962 | case 0x4500: |
963 | case 0x4504: |
963 | case 0x4504: |
964 | case 0x4508: |
964 | case 0x4508: |
965 | case 0x450C: |
965 | case 0x450C: |
966 | case 0x4510: |
966 | case 0x4510: |
967 | case 0x4514: |
967 | case 0x4514: |
968 | case 0x4518: |
968 | case 0x4518: |
969 | case 0x451C: |
969 | case 0x451C: |
970 | case 0x4520: |
970 | case 0x4520: |
971 | case 0x4524: |
971 | case 0x4524: |
972 | case 0x4528: |
972 | case 0x4528: |
973 | case 0x452C: |
973 | case 0x452C: |
974 | case 0x4530: |
974 | case 0x4530: |
975 | case 0x4534: |
975 | case 0x4534: |
976 | case 0x4538: |
976 | case 0x4538: |
977 | case 0x453C: |
977 | case 0x453C: |
978 | /* TX_FORMAT2_[0-15] */ |
978 | /* TX_FORMAT2_[0-15] */ |
979 | i = (reg - 0x4500) >> 2; |
979 | i = (reg - 0x4500) >> 2; |
980 | tmp = idx_value & 0x3FFF; |
980 | tmp = idx_value & 0x3FFF; |
981 | track->textures[i].pitch = tmp + 1; |
981 | track->textures[i].pitch = tmp + 1; |
982 | if (p->rdev->family >= CHIP_RV515) { |
982 | if (p->rdev->family >= CHIP_RV515) { |
983 | tmp = ((idx_value >> 15) & 1) << 11; |
983 | tmp = ((idx_value >> 15) & 1) << 11; |
984 | track->textures[i].width_11 = tmp; |
984 | track->textures[i].width_11 = tmp; |
985 | tmp = ((idx_value >> 16) & 1) << 11; |
985 | tmp = ((idx_value >> 16) & 1) << 11; |
986 | track->textures[i].height_11 = tmp; |
986 | track->textures[i].height_11 = tmp; |
987 | 987 | ||
988 | /* ATI1N */ |
988 | /* ATI1N */ |
989 | if (idx_value & (1 << 14)) { |
989 | if (idx_value & (1 << 14)) { |
990 | /* The same rules apply as for DXT1. */ |
990 | /* The same rules apply as for DXT1. */ |
991 | track->textures[i].compress_format = |
991 | track->textures[i].compress_format = |
992 | R100_TRACK_COMP_DXT1; |
992 | R100_TRACK_COMP_DXT1; |
993 | } |
993 | } |
994 | } else if (idx_value & (1 << 14)) { |
994 | } else if (idx_value & (1 << 14)) { |
995 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); |
995 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); |
996 | return -EINVAL; |
996 | return -EINVAL; |
997 | } |
997 | } |
998 | break; |
998 | break; |
999 | case 0x4480: |
999 | case 0x4480: |
1000 | case 0x4484: |
1000 | case 0x4484: |
1001 | case 0x4488: |
1001 | case 0x4488: |
1002 | case 0x448C: |
1002 | case 0x448C: |
1003 | case 0x4490: |
1003 | case 0x4490: |
1004 | case 0x4494: |
1004 | case 0x4494: |
1005 | case 0x4498: |
1005 | case 0x4498: |
1006 | case 0x449C: |
1006 | case 0x449C: |
1007 | case 0x44A0: |
1007 | case 0x44A0: |
1008 | case 0x44A4: |
1008 | case 0x44A4: |
1009 | case 0x44A8: |
1009 | case 0x44A8: |
1010 | case 0x44AC: |
1010 | case 0x44AC: |
1011 | case 0x44B0: |
1011 | case 0x44B0: |
1012 | case 0x44B4: |
1012 | case 0x44B4: |
1013 | case 0x44B8: |
1013 | case 0x44B8: |
1014 | case 0x44BC: |
1014 | case 0x44BC: |
1015 | /* TX_FORMAT0_[0-15] */ |
1015 | /* TX_FORMAT0_[0-15] */ |
1016 | i = (reg - 0x4480) >> 2; |
1016 | i = (reg - 0x4480) >> 2; |
1017 | tmp = idx_value & 0x7FF; |
1017 | tmp = idx_value & 0x7FF; |
1018 | track->textures[i].width = tmp + 1; |
1018 | track->textures[i].width = tmp + 1; |
1019 | tmp = (idx_value >> 11) & 0x7FF; |
1019 | tmp = (idx_value >> 11) & 0x7FF; |
1020 | track->textures[i].height = tmp + 1; |
1020 | track->textures[i].height = tmp + 1; |
1021 | tmp = (idx_value >> 26) & 0xF; |
1021 | tmp = (idx_value >> 26) & 0xF; |
1022 | track->textures[i].num_levels = tmp; |
1022 | track->textures[i].num_levels = tmp; |
1023 | tmp = idx_value & (1 << 31); |
1023 | tmp = idx_value & (1 << 31); |
1024 | track->textures[i].use_pitch = !!tmp; |
1024 | track->textures[i].use_pitch = !!tmp; |
1025 | tmp = (idx_value >> 22) & 0xF; |
1025 | tmp = (idx_value >> 22) & 0xF; |
1026 | track->textures[i].txdepth = tmp; |
1026 | track->textures[i].txdepth = tmp; |
1027 | break; |
1027 | break; |
1028 | case R300_ZB_ZPASS_ADDR: |
1028 | case R300_ZB_ZPASS_ADDR: |
1029 | r = r100_cs_packet_next_reloc(p, &reloc); |
1029 | r = r100_cs_packet_next_reloc(p, &reloc); |
1030 | if (r) { |
1030 | if (r) { |
1031 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
1031 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
1032 | idx, reg); |
1032 | idx, reg); |
1033 | r100_cs_dump_packet(p, pkt); |
1033 | r100_cs_dump_packet(p, pkt); |
1034 | return r; |
1034 | return r; |
1035 | } |
1035 | } |
1036 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1036 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1037 | break; |
1037 | break; |
1038 | case 0x4e0c: |
1038 | case 0x4e0c: |
1039 | /* RB3D_COLOR_CHANNEL_MASK */ |
1039 | /* RB3D_COLOR_CHANNEL_MASK */ |
1040 | track->color_channel_mask = idx_value; |
1040 | track->color_channel_mask = idx_value; |
1041 | break; |
1041 | break; |
1042 | case 0x4d1c: |
1042 | case 0x4d1c: |
1043 | /* ZB_BW_CNTL */ |
1043 | /* ZB_BW_CNTL */ |
1044 | track->fastfill = !!(idx_value & (1 << 2)); |
1044 | track->fastfill = !!(idx_value & (1 << 2)); |
1045 | break; |
1045 | break; |
1046 | case 0x4e04: |
1046 | case 0x4e04: |
1047 | /* RB3D_BLENDCNTL */ |
1047 | /* RB3D_BLENDCNTL */ |
1048 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1048 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1049 | break; |
1049 | break; |
1050 | case 0x4be8: |
1050 | case 0x4be8: |
1051 | /* valid register only on RV530 */ |
1051 | /* valid register only on RV530 */ |
1052 | if (p->rdev->family == CHIP_RV530) |
1052 | if (p->rdev->family == CHIP_RV530) |
1053 | break; |
1053 | break; |
1054 | /* fallthrough do not move */ |
1054 | /* fallthrough do not move */ |
1055 | default: |
1055 | default: |
1056 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
1056 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
1057 | reg, idx); |
1057 | reg, idx); |
1058 | return -EINVAL; |
1058 | return -EINVAL; |
1059 | } |
1059 | } |
1060 | return 0; |
1060 | return 0; |
1061 | } |
1061 | } |
1062 | 1062 | ||
1063 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1063 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1064 | struct radeon_cs_packet *pkt) |
1064 | struct radeon_cs_packet *pkt) |
1065 | { |
1065 | { |
1066 | struct radeon_cs_reloc *reloc; |
1066 | struct radeon_cs_reloc *reloc; |
1067 | struct r100_cs_track *track; |
1067 | struct r100_cs_track *track; |
1068 | volatile uint32_t *ib; |
1068 | volatile uint32_t *ib; |
1069 | unsigned idx; |
1069 | unsigned idx; |
1070 | int r; |
1070 | int r; |
1071 | 1071 | ||
1072 | ib = p->ib->ptr; |
1072 | ib = p->ib->ptr; |
1073 | idx = pkt->idx + 1; |
1073 | idx = pkt->idx + 1; |
1074 | track = (struct r100_cs_track *)p->track; |
1074 | track = (struct r100_cs_track *)p->track; |
1075 | switch(pkt->opcode) { |
1075 | switch(pkt->opcode) { |
1076 | case PACKET3_3D_LOAD_VBPNTR: |
1076 | case PACKET3_3D_LOAD_VBPNTR: |
1077 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1077 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1078 | if (r) |
1078 | if (r) |
1079 | return r; |
1079 | return r; |
1080 | break; |
1080 | break; |
1081 | case PACKET3_INDX_BUFFER: |
1081 | case PACKET3_INDX_BUFFER: |
1082 | r = r100_cs_packet_next_reloc(p, &reloc); |
1082 | r = r100_cs_packet_next_reloc(p, &reloc); |
1083 | if (r) { |
1083 | if (r) { |
1084 | DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); |
1084 | DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); |
1085 | r100_cs_dump_packet(p, pkt); |
1085 | r100_cs_dump_packet(p, pkt); |
1086 | return r; |
1086 | return r; |
1087 | } |
1087 | } |
1088 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); |
1088 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); |
1089 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1089 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1090 | if (r) { |
1090 | if (r) { |
1091 | return r; |
1091 | return r; |
1092 | } |
1092 | } |
1093 | break; |
1093 | break; |
1094 | /* Draw packet */ |
1094 | /* Draw packet */ |
1095 | case PACKET3_3D_DRAW_IMMD: |
1095 | case PACKET3_3D_DRAW_IMMD: |
1096 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1096 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1097 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1097 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1098 | * in cmd stream */ |
1098 | * in cmd stream */ |
1099 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1099 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1100 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1100 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1101 | return -EINVAL; |
1101 | return -EINVAL; |
1102 | } |
1102 | } |
1103 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1103 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1104 | track->immd_dwords = pkt->count - 1; |
1104 | track->immd_dwords = pkt->count - 1; |
1105 | r = r100_cs_track_check(p->rdev, track); |
1105 | r = r100_cs_track_check(p->rdev, track); |
1106 | if (r) { |
1106 | if (r) { |
1107 | return r; |
1107 | return r; |
1108 | } |
1108 | } |
1109 | break; |
1109 | break; |
1110 | case PACKET3_3D_DRAW_IMMD_2: |
1110 | case PACKET3_3D_DRAW_IMMD_2: |
1111 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1111 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1112 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1112 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1113 | * in cmd stream */ |
1113 | * in cmd stream */ |
1114 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1114 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1115 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1115 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1116 | return -EINVAL; |
1116 | return -EINVAL; |
1117 | } |
1117 | } |
1118 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1118 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1119 | track->immd_dwords = pkt->count; |
1119 | track->immd_dwords = pkt->count; |
1120 | r = r100_cs_track_check(p->rdev, track); |
1120 | r = r100_cs_track_check(p->rdev, track); |
1121 | if (r) { |
1121 | if (r) { |
1122 | return r; |
1122 | return r; |
1123 | } |
1123 | } |
1124 | break; |
1124 | break; |
1125 | case PACKET3_3D_DRAW_VBUF: |
1125 | case PACKET3_3D_DRAW_VBUF: |
1126 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1126 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1127 | r = r100_cs_track_check(p->rdev, track); |
1127 | r = r100_cs_track_check(p->rdev, track); |
1128 | if (r) { |
1128 | if (r) { |
1129 | return r; |
1129 | return r; |
1130 | } |
1130 | } |
1131 | break; |
1131 | break; |
1132 | case PACKET3_3D_DRAW_VBUF_2: |
1132 | case PACKET3_3D_DRAW_VBUF_2: |
1133 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1133 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1134 | r = r100_cs_track_check(p->rdev, track); |
1134 | r = r100_cs_track_check(p->rdev, track); |
1135 | if (r) { |
1135 | if (r) { |
1136 | return r; |
1136 | return r; |
1137 | } |
1137 | } |
1138 | break; |
1138 | break; |
1139 | case PACKET3_3D_DRAW_INDX: |
1139 | case PACKET3_3D_DRAW_INDX: |
1140 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1140 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1141 | r = r100_cs_track_check(p->rdev, track); |
1141 | r = r100_cs_track_check(p->rdev, track); |
1142 | if (r) { |
1142 | if (r) { |
1143 | return r; |
1143 | return r; |
1144 | } |
1144 | } |
1145 | break; |
1145 | break; |
1146 | case PACKET3_3D_DRAW_INDX_2: |
1146 | case PACKET3_3D_DRAW_INDX_2: |
1147 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1147 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1148 | r = r100_cs_track_check(p->rdev, track); |
1148 | r = r100_cs_track_check(p->rdev, track); |
1149 | if (r) { |
1149 | if (r) { |
1150 | return r; |
1150 | return r; |
1151 | } |
1151 | } |
1152 | break; |
1152 | break; |
1153 | case PACKET3_NOP: |
1153 | case PACKET3_NOP: |
1154 | break; |
1154 | break; |
1155 | default: |
1155 | default: |
1156 | DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); |
1156 | DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); |
1157 | return -EINVAL; |
1157 | return -EINVAL; |
1158 | } |
1158 | } |
1159 | return 0; |
1159 | return 0; |
1160 | } |
1160 | } |
1161 | 1161 | ||
1162 | int r300_cs_parse(struct radeon_cs_parser *p) |
1162 | int r300_cs_parse(struct radeon_cs_parser *p) |
1163 | { |
1163 | { |
1164 | struct radeon_cs_packet pkt; |
1164 | struct radeon_cs_packet pkt; |
1165 | struct r100_cs_track *track; |
1165 | struct r100_cs_track *track; |
1166 | int r; |
1166 | int r; |
1167 | 1167 | ||
1168 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
1168 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
1169 | r100_cs_track_clear(p->rdev, track); |
1169 | r100_cs_track_clear(p->rdev, track); |
1170 | p->track = track; |
1170 | p->track = track; |
1171 | do { |
1171 | do { |
1172 | r = r100_cs_packet_parse(p, &pkt, p->idx); |
1172 | r = r100_cs_packet_parse(p, &pkt, p->idx); |
1173 | if (r) { |
1173 | if (r) { |
1174 | return r; |
1174 | return r; |
1175 | } |
1175 | } |
1176 | p->idx += pkt.count + 2; |
1176 | p->idx += pkt.count + 2; |
1177 | switch (pkt.type) { |
1177 | switch (pkt.type) { |
1178 | case PACKET_TYPE0: |
1178 | case PACKET_TYPE0: |
1179 | r = r100_cs_parse_packet0(p, &pkt, |
1179 | r = r100_cs_parse_packet0(p, &pkt, |
1180 | p->rdev->config.r300.reg_safe_bm, |
1180 | p->rdev->config.r300.reg_safe_bm, |
1181 | p->rdev->config.r300.reg_safe_bm_size, |
1181 | p->rdev->config.r300.reg_safe_bm_size, |
1182 | &r300_packet0_check); |
1182 | &r300_packet0_check); |
1183 | break; |
1183 | break; |
1184 | case PACKET_TYPE2: |
1184 | case PACKET_TYPE2: |
1185 | break; |
1185 | break; |
1186 | case PACKET_TYPE3: |
1186 | case PACKET_TYPE3: |
1187 | r = r300_packet3_check(p, &pkt); |
1187 | r = r300_packet3_check(p, &pkt); |
1188 | break; |
1188 | break; |
1189 | default: |
1189 | default: |
1190 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); |
1190 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); |
1191 | return -EINVAL; |
1191 | return -EINVAL; |
1192 | } |
1192 | } |
1193 | if (r) { |
1193 | if (r) { |
1194 | return r; |
1194 | return r; |
1195 | } |
1195 | } |
1196 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
1196 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
1197 | return 0; |
1197 | return 0; |
1198 | } |
1198 | } |
1199 | #endif |
1199 | #endif |
1200 | 1200 | ||
1201 | 1201 | ||
1202 | void r300_set_reg_safe(struct radeon_device *rdev) |
1202 | void r300_set_reg_safe(struct radeon_device *rdev) |
1203 | { |
1203 | { |
1204 | rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; |
1204 | rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; |
1205 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
1205 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
1206 | } |
1206 | } |
1207 | 1207 | ||
1208 | void r300_mc_program(struct radeon_device *rdev) |
1208 | void r300_mc_program(struct radeon_device *rdev) |
1209 | { |
1209 | { |
1210 | struct r100_mc_save save; |
1210 | struct r100_mc_save save; |
1211 | int r; |
1211 | int r; |
1212 | 1212 | ||
1213 | r = r100_debugfs_mc_info_init(rdev); |
1213 | r = r100_debugfs_mc_info_init(rdev); |
1214 | if (r) { |
1214 | if (r) { |
1215 | dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); |
1215 | dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); |
1216 | } |
1216 | } |
1217 | 1217 | ||
1218 | /* Stops all mc clients */ |
1218 | /* Stops all mc clients */ |
1219 | r100_mc_stop(rdev, &save); |
1219 | r100_mc_stop(rdev, &save); |
1220 | if (rdev->flags & RADEON_IS_AGP) { |
1220 | if (rdev->flags & RADEON_IS_AGP) { |
1221 | WREG32(R_00014C_MC_AGP_LOCATION, |
1221 | WREG32(R_00014C_MC_AGP_LOCATION, |
1222 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
1222 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
1223 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
1223 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
1224 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
1224 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
1225 | WREG32(R_00015C_AGP_BASE_2, |
1225 | WREG32(R_00015C_AGP_BASE_2, |
1226 | upper_32_bits(rdev->mc.agp_base) & 0xff); |
1226 | upper_32_bits(rdev->mc.agp_base) & 0xff); |
1227 | } else { |
1227 | } else { |
1228 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
1228 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
1229 | WREG32(R_000170_AGP_BASE, 0); |
1229 | WREG32(R_000170_AGP_BASE, 0); |
1230 | WREG32(R_00015C_AGP_BASE_2, 0); |
1230 | WREG32(R_00015C_AGP_BASE_2, 0); |
1231 | } |
1231 | } |
1232 | /* Wait for mc idle */ |
1232 | /* Wait for mc idle */ |
1233 | if (r300_mc_wait_for_idle(rdev)) |
1233 | if (r300_mc_wait_for_idle(rdev)) |
1234 | DRM_INFO("Failed to wait MC idle before programming MC.\n"); |
1234 | DRM_INFO("Failed to wait MC idle before programming MC.\n"); |
1235 | /* Program MC, should be a 32bits limited address space */ |
1235 | /* Program MC, should be a 32bits limited address space */ |
1236 | WREG32(R_000148_MC_FB_LOCATION, |
1236 | WREG32(R_000148_MC_FB_LOCATION, |
1237 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
1237 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
1238 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
1238 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
1239 | r100_mc_resume(rdev, &save); |
1239 | r100_mc_resume(rdev, &save); |
1240 | } |
1240 | } |
1241 | 1241 | ||
1242 | void r300_clock_startup(struct radeon_device *rdev) |
1242 | void r300_clock_startup(struct radeon_device *rdev) |
1243 | { |
1243 | { |
1244 | u32 tmp; |
1244 | u32 tmp; |
1245 | 1245 | ||
1246 | if (radeon_dynclks != -1 && radeon_dynclks) |
1246 | if (radeon_dynclks != -1 && radeon_dynclks) |
1247 | radeon_legacy_set_clock_gating(rdev, 1); |
1247 | radeon_legacy_set_clock_gating(rdev, 1); |
1248 | /* We need to force on some of the block */ |
1248 | /* We need to force on some of the block */ |
1249 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); |
1249 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); |
1250 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
1250 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
1251 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) |
1251 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) |
1252 | tmp |= S_00000D_FORCE_VAP(1); |
1252 | tmp |= S_00000D_FORCE_VAP(1); |
1253 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); |
1253 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); |
1254 | } |
1254 | } |
1255 | 1255 | ||
1256 | static int r300_startup(struct radeon_device *rdev) |
1256 | static int r300_startup(struct radeon_device *rdev) |
1257 | { |
1257 | { |
1258 | int r; |
1258 | int r; |
1259 | 1259 | ||
1260 | /* set common regs */ |
1260 | /* set common regs */ |
1261 | r100_set_common_regs(rdev); |
1261 | r100_set_common_regs(rdev); |
1262 | /* program mc */ |
1262 | /* program mc */ |
1263 | r300_mc_program(rdev); |
1263 | r300_mc_program(rdev); |
1264 | /* Resume clock */ |
1264 | /* Resume clock */ |
1265 | r300_clock_startup(rdev); |
1265 | r300_clock_startup(rdev); |
1266 | /* Initialize GPU configuration (# pipes, ...) */ |
1266 | /* Initialize GPU configuration (# pipes, ...) */ |
1267 | r300_gpu_init(rdev); |
1267 | r300_gpu_init(rdev); |
1268 | /* Initialize GART (initialize after TTM so we can allocate |
1268 | /* Initialize GART (initialize after TTM so we can allocate |
1269 | * memory through TTM but finalize after TTM) */ |
1269 | * memory through TTM but finalize after TTM) */ |
1270 | if (rdev->flags & RADEON_IS_PCIE) { |
1270 | if (rdev->flags & RADEON_IS_PCIE) { |
1271 | r = rv370_pcie_gart_enable(rdev); |
1271 | r = rv370_pcie_gart_enable(rdev); |
1272 | if (r) |
1272 | if (r) |
1273 | return r; |
1273 | return r; |
1274 | } |
1274 | } |
1275 | 1275 | ||
1276 | if (rdev->family == CHIP_R300 || |
1276 | if (rdev->family == CHIP_R300 || |
1277 | rdev->family == CHIP_R350 || |
1277 | rdev->family == CHIP_R350 || |
1278 | rdev->family == CHIP_RV350) |
1278 | rdev->family == CHIP_RV350) |
1279 | r100_enable_bm(rdev); |
1279 | r100_enable_bm(rdev); |
1280 | 1280 | ||
1281 | if (rdev->flags & RADEON_IS_PCI) { |
1281 | if (rdev->flags & RADEON_IS_PCI) { |
1282 | r = r100_pci_gart_enable(rdev); |
1282 | r = r100_pci_gart_enable(rdev); |
1283 | if (r) |
1283 | if (r) |
1284 | return r; |
1284 | return r; |
1285 | } |
1285 | } |
1286 | /* Enable IRQ */ |
1286 | /* Enable IRQ */ |
1287 | // r100_irq_set(rdev); |
1287 | // r100_irq_set(rdev); |
1288 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1288 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1289 | /* 1M ring buffer */ |
1289 | /* 1M ring buffer */ |
1290 | r = r100_cp_init(rdev, 1024 * 1024); |
1290 | r = r100_cp_init(rdev, 1024 * 1024); |
1291 | if (r) { |
1291 | if (r) { |
1292 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
1292 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
1293 | return r; |
1293 | return r; |
1294 | } |
1294 | } |
1295 | // r = r100_wb_init(rdev); |
1295 | // r = r100_wb_init(rdev); |
1296 | // if (r) |
1296 | // if (r) |
1297 | // dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
1297 | // dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
1298 | // r = r100_ib_init(rdev); |
1298 | // r = r100_ib_init(rdev); |
1299 | // if (r) { |
1299 | // if (r) { |
1300 | // dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
1300 | // dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
1301 | // return r; |
1301 | // return r; |
1302 | // } |
1302 | // } |
1303 | return 0; |
1303 | return 0; |
1304 | } |
1304 | } |
1305 | 1305 | ||
1306 | 1306 | ||
1307 | 1307 | ||
1308 | 1308 | ||
1309 | 1309 | ||
1310 | int r300_init(struct radeon_device *rdev) |
1310 | int r300_init(struct radeon_device *rdev) |
1311 | { |
1311 | { |
1312 | int r; |
1312 | int r; |
1313 | 1313 | ||
1314 | /* Disable VGA */ |
1314 | /* Disable VGA */ |
1315 | r100_vga_render_disable(rdev); |
1315 | r100_vga_render_disable(rdev); |
1316 | /* Initialize scratch registers */ |
1316 | /* Initialize scratch registers */ |
1317 | radeon_scratch_init(rdev); |
1317 | radeon_scratch_init(rdev); |
1318 | /* Initialize surface registers */ |
1318 | /* Initialize surface registers */ |
1319 | radeon_surface_init(rdev); |
1319 | radeon_surface_init(rdev); |
1320 | /* TODO: disable VGA need to use VGA request */ |
1320 | /* TODO: disable VGA need to use VGA request */ |
1321 | /* BIOS*/ |
1321 | /* BIOS*/ |
1322 | if (!radeon_get_bios(rdev)) { |
1322 | if (!radeon_get_bios(rdev)) { |
1323 | if (ASIC_IS_AVIVO(rdev)) |
1323 | if (ASIC_IS_AVIVO(rdev)) |
1324 | return -EINVAL; |
1324 | return -EINVAL; |
1325 | } |
1325 | } |
1326 | if (rdev->is_atom_bios) { |
1326 | if (rdev->is_atom_bios) { |
1327 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); |
1327 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); |
1328 | return -EINVAL; |
1328 | return -EINVAL; |
1329 | } else { |
1329 | } else { |
1330 | r = radeon_combios_init(rdev); |
1330 | r = radeon_combios_init(rdev); |
1331 | if (r) |
1331 | if (r) |
1332 | return r; |
1332 | return r; |
1333 | } |
1333 | } |
1334 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1334 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1335 | if (radeon_gpu_reset(rdev)) { |
1335 | if (radeon_gpu_reset(rdev)) { |
1336 | dev_warn(rdev->dev, |
1336 | dev_warn(rdev->dev, |
1337 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
1337 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
1338 | RREG32(R_000E40_RBBM_STATUS), |
1338 | RREG32(R_000E40_RBBM_STATUS), |
1339 | RREG32(R_0007C0_CP_STAT)); |
1339 | RREG32(R_0007C0_CP_STAT)); |
1340 | } |
1340 | } |
1341 | /* check if cards are posted or not */ |
1341 | /* check if cards are posted or not */ |
1342 | if (radeon_boot_test_post_card(rdev) == false) |
1342 | if (radeon_boot_test_post_card(rdev) == false) |
1343 | return -EINVAL; |
1343 | return -EINVAL; |
1344 | /* Set asic errata */ |
1344 | /* Set asic errata */ |
1345 | r300_errata(rdev); |
1345 | r300_errata(rdev); |
1346 | /* Initialize clocks */ |
1346 | /* Initialize clocks */ |
1347 | radeon_get_clock_info(rdev->ddev); |
1347 | radeon_get_clock_info(rdev->ddev); |
1348 | /* Initialize power management */ |
1348 | /* Initialize power management */ |
1349 | radeon_pm_init(rdev); |
1349 | radeon_pm_init(rdev); |
1350 | /* Get vram informations */ |
1350 | /* Get vram informations */ |
1351 | r300_vram_info(rdev); |
1351 | r300_vram_info(rdev); |
1352 | /* Initialize memory controller (also test AGP) */ |
1352 | /* Initialize memory controller (also test AGP) */ |
1353 | r = r420_mc_init(rdev); |
1353 | r = r420_mc_init(rdev); |
1354 | dbgprintf("mc vram location %x\n", rdev->mc.vram_location); |
1354 | dbgprintf("mc vram location %x\n", rdev->mc.vram_location); |
1355 | if (r) |
1355 | if (r) |
1356 | return r; |
1356 | return r; |
1357 | /* Fence driver */ |
1357 | /* Fence driver */ |
1358 | // r = radeon_fence_driver_init(rdev); |
1358 | // r = radeon_fence_driver_init(rdev); |
1359 | // if (r) |
1359 | // if (r) |
1360 | // return r; |
1360 | // return r; |
1361 | // r = radeon_irq_kms_init(rdev); |
1361 | // r = radeon_irq_kms_init(rdev); |
1362 | // if (r) |
1362 | // if (r) |
1363 | // return r; |
1363 | // return r; |
1364 | /* Memory manager */ |
1364 | /* Memory manager */ |
1365 | r = radeon_bo_init(rdev); |
1365 | r = radeon_bo_init(rdev); |
1366 | if (r) |
1366 | if (r) |
1367 | return r; |
1367 | return r; |
1368 | if (rdev->flags & RADEON_IS_PCIE) { |
1368 | if (rdev->flags & RADEON_IS_PCIE) { |
1369 | r = rv370_pcie_gart_init(rdev); |
1369 | r = rv370_pcie_gart_init(rdev); |
1370 | if (r) |
1370 | if (r) |
1371 | return r; |
1371 | return r; |
1372 | } |
1372 | } |
1373 | if (rdev->flags & RADEON_IS_PCI) { |
1373 | if (rdev->flags & RADEON_IS_PCI) { |
1374 | r = r100_pci_gart_init(rdev); |
1374 | r = r100_pci_gart_init(rdev); |
1375 | if (r) |
1375 | if (r) |
1376 | return r; |
1376 | return r; |
1377 | } |
1377 | } |
1378 | r300_set_reg_safe(rdev); |
1378 | r300_set_reg_safe(rdev); |
1379 | rdev->accel_working = true; |
1379 | rdev->accel_working = true; |
1380 | r = r300_startup(rdev); |
1380 | r = r300_startup(rdev); |
1381 | if (r) { |
1381 | if (r) { |
1382 | /* Somethings want wront with the accel init stop accel */ |
1382 | /* Somethings want wront with the accel init stop accel */ |
1383 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1383 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1384 | // r300_suspend(rdev); |
- | |
1385 | // r100_cp_fini(rdev); |
1384 | // r100_cp_fini(rdev); |
1386 | // r100_wb_fini(rdev); |
1385 | // r100_wb_fini(rdev); |
1387 | // r100_ib_fini(rdev); |
1386 | // r100_ib_fini(rdev); |
1388 | if (rdev->flags & RADEON_IS_PCIE) |
1387 | if (rdev->flags & RADEON_IS_PCIE) |
1389 | rv370_pcie_gart_fini(rdev); |
1388 | rv370_pcie_gart_fini(rdev); |
1390 | if (rdev->flags & RADEON_IS_PCI) |
1389 | if (rdev->flags & RADEON_IS_PCI) |
1391 | r100_pci_gart_fini(rdev); |
1390 | r100_pci_gart_fini(rdev); |
1392 | // radeon_agp_fini(rdev); |
1391 | // radeon_agp_fini(rdev); |
1393 | rdev->accel_working = false; |
1392 | rdev->accel_working = false; |
1394 | } |
1393 | } |
1395 | return 0; |
1394 | return 0; |
1396 | }>><>><>><>><>><>><>><>>><>>><>><>>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>>><>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>><>><>><>><>>> |
1395 | }>><>><>><>><>><>><>><>>><>>><>><>>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>>><>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>><>><>><>><>>> |