Rev 1430 | Rev 2005 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1430 | Rev 1963 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
- | 29 | #include |
|
29 | #include "drmP.h" |
30 | #include "drmP.h" |
30 | #include "radeon_reg.h" |
31 | #include "radeon_reg.h" |
31 | #include "radeon.h" |
32 | #include "radeon.h" |
- | 33 | #include "radeon_asic.h" |
|
32 | #include "atom.h" |
34 | #include "atom.h" |
33 | #include "r100d.h" |
35 | #include "r100d.h" |
34 | #include "r420d.h" |
36 | #include "r420d.h" |
35 | #include "r420_reg_safe.h" |
37 | #include "r420_reg_safe.h" |
36 | 38 | ||
37 | static void r420_set_reg_safe(struct radeon_device *rdev) |
39 | static void r420_set_reg_safe(struct radeon_device *rdev) |
38 | { |
40 | { |
39 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; |
41 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; |
40 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); |
42 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); |
41 | } |
43 | } |
42 | 44 | ||
43 | void r420_pipes_init(struct radeon_device *rdev) |
45 | void r420_pipes_init(struct radeon_device *rdev) |
44 | { |
46 | { |
45 | unsigned tmp; |
47 | unsigned tmp; |
46 | unsigned gb_pipe_select; |
48 | unsigned gb_pipe_select; |
47 | unsigned num_pipes; |
49 | unsigned num_pipes; |
48 | 50 | ||
49 | /* GA_ENHANCE workaround TCL deadlock issue */ |
51 | /* GA_ENHANCE workaround TCL deadlock issue */ |
50 | WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL | |
52 | WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL | |
51 | (1 << 2) | (1 << 3)); |
53 | (1 << 2) | (1 << 3)); |
52 | /* add idle wait as per freedesktop.org bug 24041 */ |
54 | /* add idle wait as per freedesktop.org bug 24041 */ |
53 | if (r100_gui_wait_for_idle(rdev)) { |
55 | if (r100_gui_wait_for_idle(rdev)) { |
54 | printk(KERN_WARNING "Failed to wait GUI idle while " |
56 | printk(KERN_WARNING "Failed to wait GUI idle while " |
55 | "programming pipes. Bad things might happen.\n"); |
57 | "programming pipes. Bad things might happen.\n"); |
56 | } |
58 | } |
57 | /* get max number of pipes */ |
59 | /* get max number of pipes */ |
58 | gb_pipe_select = RREG32(0x402C); |
60 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
59 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
- | 62 | ||
- | 63 | /* SE chips have 1 pipe */ |
|
- | 64 | if ((rdev->pdev->device == 0x5e4c) || |
|
- | 65 | (rdev->pdev->device == 0x5e4f)) |
|
- | 66 | num_pipes = 1; |
|
- | 67 | ||
60 | rdev->num_gb_pipes = num_pipes; |
68 | rdev->num_gb_pipes = num_pipes; |
61 | tmp = 0; |
69 | tmp = 0; |
62 | switch (num_pipes) { |
70 | switch (num_pipes) { |
63 | default: |
71 | default: |
64 | /* force to 1 pipe */ |
72 | /* force to 1 pipe */ |
65 | num_pipes = 1; |
73 | num_pipes = 1; |
66 | case 1: |
74 | case 1: |
67 | tmp = (0 << 1); |
75 | tmp = (0 << 1); |
68 | break; |
76 | break; |
69 | case 2: |
77 | case 2: |
70 | tmp = (3 << 1); |
78 | tmp = (3 << 1); |
71 | break; |
79 | break; |
72 | case 3: |
80 | case 3: |
73 | tmp = (6 << 1); |
81 | tmp = (6 << 1); |
74 | break; |
82 | break; |
75 | case 4: |
83 | case 4: |
76 | tmp = (7 << 1); |
84 | tmp = (7 << 1); |
77 | break; |
85 | break; |
78 | } |
86 | } |
79 | WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1); |
87 | WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1); |
80 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
88 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
81 | tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING; |
89 | tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING; |
82 | WREG32(R300_GB_TILE_CONFIG, tmp); |
90 | WREG32(R300_GB_TILE_CONFIG, tmp); |
83 | if (r100_gui_wait_for_idle(rdev)) { |
91 | if (r100_gui_wait_for_idle(rdev)) { |
84 | printk(KERN_WARNING "Failed to wait GUI idle while " |
92 | printk(KERN_WARNING "Failed to wait GUI idle while " |
85 | "programming pipes. Bad things might happen.\n"); |
93 | "programming pipes. Bad things might happen.\n"); |
86 | } |
94 | } |
87 | 95 | ||
88 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
96 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
89 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
97 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
90 | 98 | ||
91 | WREG32(R300_RB2D_DSTCACHE_MODE, |
99 | WREG32(R300_RB2D_DSTCACHE_MODE, |
92 | RREG32(R300_RB2D_DSTCACHE_MODE) | |
100 | RREG32(R300_RB2D_DSTCACHE_MODE) | |
93 | R300_DC_AUTOFLUSH_ENABLE | |
101 | R300_DC_AUTOFLUSH_ENABLE | |
94 | R300_DC_DC_DISABLE_IGNORE_PE); |
102 | R300_DC_DC_DISABLE_IGNORE_PE); |
95 | 103 | ||
96 | if (r100_gui_wait_for_idle(rdev)) { |
104 | if (r100_gui_wait_for_idle(rdev)) { |
97 | printk(KERN_WARNING "Failed to wait GUI idle while " |
105 | printk(KERN_WARNING "Failed to wait GUI idle while " |
98 | "programming pipes. Bad things might happen.\n"); |
106 | "programming pipes. Bad things might happen.\n"); |
99 | } |
107 | } |
100 | 108 | ||
101 | if (rdev->family == CHIP_RV530) { |
109 | if (rdev->family == CHIP_RV530) { |
102 | tmp = RREG32(RV530_GB_PIPE_SELECT2); |
110 | tmp = RREG32(RV530_GB_PIPE_SELECT2); |
103 | if ((tmp & 3) == 3) |
111 | if ((tmp & 3) == 3) |
104 | rdev->num_z_pipes = 2; |
112 | rdev->num_z_pipes = 2; |
105 | else |
113 | else |
106 | rdev->num_z_pipes = 1; |
114 | rdev->num_z_pipes = 1; |
107 | } else |
115 | } else |
108 | rdev->num_z_pipes = 1; |
116 | rdev->num_z_pipes = 1; |
109 | 117 | ||
110 | DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", |
118 | DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", |
111 | rdev->num_gb_pipes, rdev->num_z_pipes); |
119 | rdev->num_gb_pipes, rdev->num_z_pipes); |
112 | } |
120 | } |
113 | 121 | ||
114 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
122 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
115 | { |
123 | { |
116 | u32 r; |
124 | u32 r; |
117 | 125 | ||
118 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); |
126 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); |
119 | r = RREG32(R_0001FC_MC_IND_DATA); |
127 | r = RREG32(R_0001FC_MC_IND_DATA); |
120 | return r; |
128 | return r; |
121 | } |
129 | } |
122 | 130 | ||
123 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
131 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
124 | { |
132 | { |
125 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | |
133 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | |
126 | S_0001F8_MC_IND_WR_EN(1)); |
134 | S_0001F8_MC_IND_WR_EN(1)); |
127 | WREG32(R_0001FC_MC_IND_DATA, v); |
135 | WREG32(R_0001FC_MC_IND_DATA, v); |
128 | } |
136 | } |
129 | 137 | ||
130 | static void r420_debugfs(struct radeon_device *rdev) |
138 | static void r420_debugfs(struct radeon_device *rdev) |
131 | { |
139 | { |
132 | if (r100_debugfs_rbbm_init(rdev)) { |
140 | if (r100_debugfs_rbbm_init(rdev)) { |
133 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
141 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
134 | } |
142 | } |
135 | if (r420_debugfs_pipes_info_init(rdev)) { |
143 | if (r420_debugfs_pipes_info_init(rdev)) { |
136 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
144 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
137 | } |
145 | } |
138 | } |
146 | } |
139 | 147 | ||
140 | static void r420_clock_resume(struct radeon_device *rdev) |
148 | static void r420_clock_resume(struct radeon_device *rdev) |
141 | { |
149 | { |
142 | u32 sclk_cntl; |
150 | u32 sclk_cntl; |
143 | 151 | ||
144 | if (radeon_dynclks != -1 && radeon_dynclks) |
152 | if (radeon_dynclks != -1 && radeon_dynclks) |
145 | radeon_atom_set_clock_gating(rdev, 1); |
153 | radeon_atom_set_clock_gating(rdev, 1); |
146 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
154 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
147 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
155 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
148 | if (rdev->family == CHIP_R420) |
156 | if (rdev->family == CHIP_R420) |
149 | sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); |
157 | sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); |
150 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); |
158 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); |
151 | } |
159 | } |
152 | 160 | ||
153 | static void r420_cp_errata_init(struct radeon_device *rdev) |
161 | static void r420_cp_errata_init(struct radeon_device *rdev) |
154 | { |
162 | { |
155 | /* RV410 and R420 can lock up if CP DMA to host memory happens |
163 | /* RV410 and R420 can lock up if CP DMA to host memory happens |
156 | * while the 2D engine is busy. |
164 | * while the 2D engine is busy. |
157 | * |
165 | * |
158 | * The proper workaround is to queue a RESYNC at the beginning |
166 | * The proper workaround is to queue a RESYNC at the beginning |
159 | * of the CP init, apparently. |
167 | * of the CP init, apparently. |
160 | */ |
168 | */ |
161 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); |
169 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); |
162 | radeon_ring_lock(rdev, 8); |
170 | radeon_ring_lock(rdev, 8); |
163 | radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1)); |
171 | radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1)); |
164 | radeon_ring_write(rdev, rdev->config.r300.resync_scratch); |
172 | radeon_ring_write(rdev, rdev->config.r300.resync_scratch); |
165 | radeon_ring_write(rdev, 0xDEADBEEF); |
173 | radeon_ring_write(rdev, 0xDEADBEEF); |
166 | radeon_ring_unlock_commit(rdev); |
174 | radeon_ring_unlock_commit(rdev); |
167 | } |
175 | } |
168 | 176 | ||
169 | static void r420_cp_errata_fini(struct radeon_device *rdev) |
177 | static void r420_cp_errata_fini(struct radeon_device *rdev) |
170 | { |
178 | { |
171 | /* Catch the RESYNC we dispatched all the way back, |
179 | /* Catch the RESYNC we dispatched all the way back, |
172 | * at the very beginning of the CP init. |
180 | * at the very beginning of the CP init. |
173 | */ |
181 | */ |
174 | radeon_ring_lock(rdev, 8); |
182 | radeon_ring_lock(rdev, 8); |
175 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
183 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
176 | radeon_ring_write(rdev, R300_RB3D_DC_FINISH); |
184 | radeon_ring_write(rdev, R300_RB3D_DC_FINISH); |
177 | radeon_ring_unlock_commit(rdev); |
185 | radeon_ring_unlock_commit(rdev); |
178 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); |
186 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); |
179 | } |
187 | } |
180 | 188 | ||
181 | static int r420_startup(struct radeon_device *rdev) |
189 | static int r420_startup(struct radeon_device *rdev) |
182 | { |
190 | { |
183 | int r; |
191 | int r; |
184 | 192 | ||
185 | /* set common regs */ |
193 | /* set common regs */ |
186 | r100_set_common_regs(rdev); |
194 | r100_set_common_regs(rdev); |
187 | /* program mc */ |
195 | /* program mc */ |
188 | r300_mc_program(rdev); |
196 | r300_mc_program(rdev); |
189 | /* Resume clock */ |
197 | /* Resume clock */ |
190 | r420_clock_resume(rdev); |
198 | r420_clock_resume(rdev); |
191 | /* Initialize GART (initialize after TTM so we can allocate |
199 | /* Initialize GART (initialize after TTM so we can allocate |
192 | * memory through TTM but finalize after TTM) */ |
200 | * memory through TTM but finalize after TTM) */ |
193 | if (rdev->flags & RADEON_IS_PCIE) { |
201 | if (rdev->flags & RADEON_IS_PCIE) { |
194 | r = rv370_pcie_gart_enable(rdev); |
202 | r = rv370_pcie_gart_enable(rdev); |
195 | if (r) |
203 | if (r) |
196 | return r; |
204 | return r; |
197 | } |
205 | } |
198 | if (rdev->flags & RADEON_IS_PCI) { |
206 | if (rdev->flags & RADEON_IS_PCI) { |
199 | r = r100_pci_gart_enable(rdev); |
207 | r = r100_pci_gart_enable(rdev); |
200 | if (r) |
208 | if (r) |
201 | return r; |
209 | return r; |
202 | } |
210 | } |
203 | r420_pipes_init(rdev); |
211 | r420_pipes_init(rdev); |
204 | /* Enable IRQ */ |
212 | /* Enable IRQ */ |
205 | // r100_irq_set(rdev); |
- | |
206 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
213 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
207 | /* 1M ring buffer */ |
214 | /* 1M ring buffer */ |
208 | r = r100_cp_init(rdev, 1024 * 1024); |
215 | r = r100_cp_init(rdev, 1024 * 1024); |
209 | if (r) { |
216 | if (r) { |
210 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
217 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
211 | return r; |
218 | return r; |
212 | } |
219 | } |
213 | r420_cp_errata_init(rdev); |
220 | r420_cp_errata_init(rdev); |
214 | // r = r100_wb_init(rdev); |
- | |
215 | // if (r) { |
- | |
216 | // dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
- | |
217 | // } |
- | |
218 | // r = r100_ib_init(rdev); |
- | |
219 | // if (r) { |
- | |
220 | // dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
- | |
221 | // return r; |
- | |
222 | // } |
- | |
223 | return 0; |
221 | return 0; |
224 | } |
222 | } |
225 | 223 | ||
226 | int r420_resume(struct radeon_device *rdev) |
224 | int r420_resume(struct radeon_device *rdev) |
227 | { |
225 | { |
228 | /* Make sur GART are not working */ |
226 | /* Make sur GART are not working */ |
229 | if (rdev->flags & RADEON_IS_PCIE) |
227 | if (rdev->flags & RADEON_IS_PCIE) |
230 | rv370_pcie_gart_disable(rdev); |
228 | rv370_pcie_gart_disable(rdev); |
231 | if (rdev->flags & RADEON_IS_PCI) |
229 | if (rdev->flags & RADEON_IS_PCI) |
232 | r100_pci_gart_disable(rdev); |
230 | r100_pci_gart_disable(rdev); |
233 | /* Resume clock before doing reset */ |
231 | /* Resume clock before doing reset */ |
234 | r420_clock_resume(rdev); |
232 | r420_clock_resume(rdev); |
235 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
233 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
236 | if (radeon_gpu_reset(rdev)) { |
234 | if (radeon_asic_reset(rdev)) { |
237 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
235 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
238 | RREG32(R_000E40_RBBM_STATUS), |
236 | RREG32(R_000E40_RBBM_STATUS), |
239 | RREG32(R_0007C0_CP_STAT)); |
237 | RREG32(R_0007C0_CP_STAT)); |
240 | } |
238 | } |
241 | /* check if cards are posted or not */ |
239 | /* check if cards are posted or not */ |
242 | if (rdev->is_atom_bios) { |
240 | if (rdev->is_atom_bios) { |
243 | atom_asic_init(rdev->mode_info.atom_context); |
241 | atom_asic_init(rdev->mode_info.atom_context); |
244 | } else { |
242 | } else { |
245 | radeon_combios_asic_init(rdev->ddev); |
243 | radeon_combios_asic_init(rdev->ddev); |
246 | } |
244 | } |
247 | /* Resume clock after posting */ |
245 | /* Resume clock after posting */ |
248 | r420_clock_resume(rdev); |
246 | r420_clock_resume(rdev); |
249 | /* Initialize surface registers */ |
247 | /* Initialize surface registers */ |
250 | radeon_surface_init(rdev); |
248 | radeon_surface_init(rdev); |
251 | return r420_startup(rdev); |
249 | return r420_startup(rdev); |
252 | } |
250 | } |
253 | 251 | ||
254 | 252 | ||
255 | 253 | ||
256 | int r420_init(struct radeon_device *rdev) |
254 | int r420_init(struct radeon_device *rdev) |
257 | { |
255 | { |
258 | int r; |
256 | int r; |
259 | 257 | ||
260 | /* Initialize scratch registers */ |
258 | /* Initialize scratch registers */ |
261 | radeon_scratch_init(rdev); |
259 | radeon_scratch_init(rdev); |
262 | /* Initialize surface registers */ |
260 | /* Initialize surface registers */ |
263 | radeon_surface_init(rdev); |
261 | radeon_surface_init(rdev); |
264 | /* TODO: disable VGA need to use VGA request */ |
262 | /* TODO: disable VGA need to use VGA request */ |
- | 263 | /* restore some register to sane defaults */ |
|
- | 264 | r100_restore_sanity(rdev); |
|
265 | /* BIOS*/ |
265 | /* BIOS*/ |
266 | if (!radeon_get_bios(rdev)) { |
266 | if (!radeon_get_bios(rdev)) { |
267 | if (ASIC_IS_AVIVO(rdev)) |
267 | if (ASIC_IS_AVIVO(rdev)) |
268 | return -EINVAL; |
268 | return -EINVAL; |
269 | } |
269 | } |
270 | if (rdev->is_atom_bios) { |
270 | if (rdev->is_atom_bios) { |
271 | r = radeon_atombios_init(rdev); |
271 | r = radeon_atombios_init(rdev); |
272 | if (r) { |
272 | if (r) { |
273 | return r; |
273 | return r; |
274 | } |
274 | } |
275 | } else { |
275 | } else { |
276 | r = radeon_combios_init(rdev); |
276 | r = radeon_combios_init(rdev); |
277 | if (r) { |
277 | if (r) { |
278 | return r; |
278 | return r; |
279 | } |
279 | } |
280 | } |
280 | } |
281 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
281 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
282 | if (radeon_gpu_reset(rdev)) { |
282 | if (radeon_asic_reset(rdev)) { |
283 | dev_warn(rdev->dev, |
283 | dev_warn(rdev->dev, |
284 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
284 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
285 | RREG32(R_000E40_RBBM_STATUS), |
285 | RREG32(R_000E40_RBBM_STATUS), |
286 | RREG32(R_0007C0_CP_STAT)); |
286 | RREG32(R_0007C0_CP_STAT)); |
287 | } |
287 | } |
288 | /* check if cards are posted or not */ |
288 | /* check if cards are posted or not */ |
289 | if (radeon_boot_test_post_card(rdev) == false) |
289 | if (radeon_boot_test_post_card(rdev) == false) |
290 | return -EINVAL; |
290 | return -EINVAL; |
291 | 291 | ||
292 | /* Initialize clocks */ |
292 | /* Initialize clocks */ |
293 | radeon_get_clock_info(rdev->ddev); |
293 | radeon_get_clock_info(rdev->ddev); |
294 | /* Initialize power management */ |
- | |
295 | radeon_pm_init(rdev); |
- | |
296 | /* initialize AGP */ |
294 | /* initialize AGP */ |
297 | if (rdev->flags & RADEON_IS_AGP) { |
295 | if (rdev->flags & RADEON_IS_AGP) { |
298 | r = radeon_agp_init(rdev); |
296 | r = radeon_agp_init(rdev); |
299 | if (r) { |
297 | if (r) { |
300 | radeon_agp_disable(rdev); |
298 | radeon_agp_disable(rdev); |
301 | } |
299 | } |
302 | } |
300 | } |
303 | /* initialize memory controller */ |
301 | /* initialize memory controller */ |
304 | r300_mc_init(rdev); |
302 | r300_mc_init(rdev); |
305 | r420_debugfs(rdev); |
303 | r420_debugfs(rdev); |
306 | /* Fence driver */ |
304 | /* Fence driver */ |
307 | // r = radeon_fence_driver_init(rdev); |
- | |
308 | // if (r) { |
- | |
309 | // return r; |
- | |
310 | // } |
- | |
311 | // r = radeon_irq_kms_init(rdev); |
- | |
312 | // if (r) { |
- | |
313 | // return r; |
- | |
314 | // } |
305 | |
315 | /* Memory manager */ |
306 | /* Memory manager */ |
316 | r = radeon_bo_init(rdev); |
307 | r = radeon_bo_init(rdev); |
317 | if (r) { |
308 | if (r) { |
318 | return r; |
309 | return r; |
319 | } |
310 | } |
320 | if (rdev->family == CHIP_R420) |
311 | if (rdev->family == CHIP_R420) |
321 | r100_enable_bm(rdev); |
312 | r100_enable_bm(rdev); |
322 | 313 | ||
323 | if (rdev->flags & RADEON_IS_PCIE) { |
314 | if (rdev->flags & RADEON_IS_PCIE) { |
324 | r = rv370_pcie_gart_init(rdev); |
315 | r = rv370_pcie_gart_init(rdev); |
325 | if (r) |
316 | if (r) |
326 | return r; |
317 | return r; |
327 | } |
318 | } |
328 | if (rdev->flags & RADEON_IS_PCI) { |
319 | if (rdev->flags & RADEON_IS_PCI) { |
329 | r = r100_pci_gart_init(rdev); |
320 | r = r100_pci_gart_init(rdev); |
330 | if (r) |
321 | if (r) |
331 | return r; |
322 | return r; |
332 | } |
323 | } |
333 | r420_set_reg_safe(rdev); |
324 | r420_set_reg_safe(rdev); |
334 | rdev->accel_working = true; |
325 | rdev->accel_working = true; |
335 | r = r420_startup(rdev); |
326 | r = r420_startup(rdev); |
336 | if (r) { |
327 | if (r) { |
337 | /* Somethings want wront with the accel init stop accel */ |
328 | /* Somethings want wront with the accel init stop accel */ |
338 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
329 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
339 | // r100_cp_fini(rdev); |
- | |
340 | // r100_wb_fini(rdev); |
- | |
341 | // r100_ib_fini(rdev); |
- | |
342 | if (rdev->flags & RADEON_IS_PCIE) |
330 | if (rdev->flags & RADEON_IS_PCIE) |
343 | rv370_pcie_gart_fini(rdev); |
331 | rv370_pcie_gart_fini(rdev); |
344 | if (rdev->flags & RADEON_IS_PCI) |
332 | if (rdev->flags & RADEON_IS_PCI) |
345 | r100_pci_gart_fini(rdev); |
333 | r100_pci_gart_fini(rdev); |
346 | // radeon_agp_fini(rdev); |
- | |
347 | rdev->accel_working = false; |
334 | rdev->accel_working = false; |
348 | } |
335 | } |
349 | return 0; |
336 | return 0; |
350 | } |
337 | } |
351 | 338 | ||
352 | /* |
339 | /* |
353 | * Debugfs info |
340 | * Debugfs info |
354 | */ |
341 | */ |
355 | #if defined(CONFIG_DEBUG_FS) |
342 | #if defined(CONFIG_DEBUG_FS) |
356 | static int r420_debugfs_pipes_info(struct seq_file *m, void *data) |
343 | static int r420_debugfs_pipes_info(struct seq_file *m, void *data) |
357 | { |
344 | { |
358 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
345 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
359 | struct drm_device *dev = node->minor->dev; |
346 | struct drm_device *dev = node->minor->dev; |
360 | struct radeon_device *rdev = dev->dev_private; |
347 | struct radeon_device *rdev = dev->dev_private; |
361 | uint32_t tmp; |
348 | uint32_t tmp; |
362 | 349 | ||
363 | tmp = RREG32(R400_GB_PIPE_SELECT); |
350 | tmp = RREG32(R400_GB_PIPE_SELECT); |
364 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
351 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
365 | tmp = RREG32(R300_GB_TILE_CONFIG); |
352 | tmp = RREG32(R300_GB_TILE_CONFIG); |
366 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
353 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
367 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
354 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
368 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
355 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
369 | return 0; |
356 | return 0; |
370 | } |
357 | } |
371 | 358 | ||
372 | static struct drm_info_list r420_pipes_info_list[] = { |
359 | static struct drm_info_list r420_pipes_info_list[] = { |
373 | {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL}, |
360 | {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL}, |
374 | }; |
361 | }; |
375 | #endif |
362 | #endif |
376 | 363 | ||
377 | int r420_debugfs_pipes_info_init(struct radeon_device *rdev) |
364 | int r420_debugfs_pipes_info_init(struct radeon_device *rdev) |
378 | { |
365 | { |
379 | #if defined(CONFIG_DEBUG_FS) |
366 | #if defined(CONFIG_DEBUG_FS) |
380 | return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1); |
367 | return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1); |
381 | #else |
368 | #else |
382 | return 0; |
369 | return 0; |
383 | #endif |
370 | #endif |
384 | }><>><>><>><>><>><>><> |
371 | }><>><>><>><>><>><>><> |