Rev 2997 | Rev 3764 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2997 | Rev 3120 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include "radeon_reg.h" |
31 | #include "radeon_reg.h" |
32 | #include "radeon.h" |
32 | #include "radeon.h" |
33 | #include "radeon_asic.h" |
33 | #include "radeon_asic.h" |
34 | #include "atom.h" |
34 | #include "atom.h" |
35 | #include "r100d.h" |
35 | #include "r100d.h" |
36 | #include "r420d.h" |
36 | #include "r420d.h" |
37 | #include "r420_reg_safe.h" |
37 | #include "r420_reg_safe.h" |
38 | 38 | ||
39 | static void r420_set_reg_safe(struct radeon_device *rdev) |
39 | static void r420_set_reg_safe(struct radeon_device *rdev) |
40 | { |
40 | { |
41 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; |
41 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; |
42 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); |
42 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); |
43 | } |
43 | } |
44 | 44 | ||
45 | void r420_pipes_init(struct radeon_device *rdev) |
45 | void r420_pipes_init(struct radeon_device *rdev) |
46 | { |
46 | { |
47 | unsigned tmp; |
47 | unsigned tmp; |
48 | unsigned gb_pipe_select; |
48 | unsigned gb_pipe_select; |
49 | unsigned num_pipes; |
49 | unsigned num_pipes; |
50 | 50 | ||
51 | /* GA_ENHANCE workaround TCL deadlock issue */ |
51 | /* GA_ENHANCE workaround TCL deadlock issue */ |
52 | WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL | |
52 | WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL | |
53 | (1 << 2) | (1 << 3)); |
53 | (1 << 2) | (1 << 3)); |
54 | /* add idle wait as per freedesktop.org bug 24041 */ |
54 | /* add idle wait as per freedesktop.org bug 24041 */ |
55 | if (r100_gui_wait_for_idle(rdev)) { |
55 | if (r100_gui_wait_for_idle(rdev)) { |
56 | printk(KERN_WARNING "Failed to wait GUI idle while " |
56 | printk(KERN_WARNING "Failed to wait GUI idle while " |
57 | "programming pipes. Bad things might happen.\n"); |
57 | "programming pipes. Bad things might happen.\n"); |
58 | } |
58 | } |
59 | /* get max number of pipes */ |
59 | /* get max number of pipes */ |
60 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
60 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
62 | 62 | ||
63 | /* SE chips have 1 pipe */ |
63 | /* SE chips have 1 pipe */ |
64 | if ((rdev->pdev->device == 0x5e4c) || |
64 | if ((rdev->pdev->device == 0x5e4c) || |
65 | (rdev->pdev->device == 0x5e4f)) |
65 | (rdev->pdev->device == 0x5e4f)) |
66 | num_pipes = 1; |
66 | num_pipes = 1; |
67 | 67 | ||
68 | rdev->num_gb_pipes = num_pipes; |
68 | rdev->num_gb_pipes = num_pipes; |
69 | tmp = 0; |
69 | tmp = 0; |
70 | switch (num_pipes) { |
70 | switch (num_pipes) { |
71 | default: |
71 | default: |
72 | /* force to 1 pipe */ |
72 | /* force to 1 pipe */ |
73 | num_pipes = 1; |
73 | num_pipes = 1; |
74 | case 1: |
74 | case 1: |
75 | tmp = (0 << 1); |
75 | tmp = (0 << 1); |
76 | break; |
76 | break; |
77 | case 2: |
77 | case 2: |
78 | tmp = (3 << 1); |
78 | tmp = (3 << 1); |
79 | break; |
79 | break; |
80 | case 3: |
80 | case 3: |
81 | tmp = (6 << 1); |
81 | tmp = (6 << 1); |
82 | break; |
82 | break; |
83 | case 4: |
83 | case 4: |
84 | tmp = (7 << 1); |
84 | tmp = (7 << 1); |
85 | break; |
85 | break; |
86 | } |
86 | } |
87 | WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1); |
87 | WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1); |
88 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
88 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
89 | tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING; |
89 | tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING; |
90 | WREG32(R300_GB_TILE_CONFIG, tmp); |
90 | WREG32(R300_GB_TILE_CONFIG, tmp); |
91 | if (r100_gui_wait_for_idle(rdev)) { |
91 | if (r100_gui_wait_for_idle(rdev)) { |
92 | printk(KERN_WARNING "Failed to wait GUI idle while " |
92 | printk(KERN_WARNING "Failed to wait GUI idle while " |
93 | "programming pipes. Bad things might happen.\n"); |
93 | "programming pipes. Bad things might happen.\n"); |
94 | } |
94 | } |
95 | 95 | ||
96 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
96 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
97 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
97 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
98 | 98 | ||
99 | WREG32(R300_RB2D_DSTCACHE_MODE, |
99 | WREG32(R300_RB2D_DSTCACHE_MODE, |
100 | RREG32(R300_RB2D_DSTCACHE_MODE) | |
100 | RREG32(R300_RB2D_DSTCACHE_MODE) | |
101 | R300_DC_AUTOFLUSH_ENABLE | |
101 | R300_DC_AUTOFLUSH_ENABLE | |
102 | R300_DC_DC_DISABLE_IGNORE_PE); |
102 | R300_DC_DC_DISABLE_IGNORE_PE); |
103 | 103 | ||
104 | if (r100_gui_wait_for_idle(rdev)) { |
104 | if (r100_gui_wait_for_idle(rdev)) { |
105 | printk(KERN_WARNING "Failed to wait GUI idle while " |
105 | printk(KERN_WARNING "Failed to wait GUI idle while " |
106 | "programming pipes. Bad things might happen.\n"); |
106 | "programming pipes. Bad things might happen.\n"); |
107 | } |
107 | } |
108 | 108 | ||
109 | if (rdev->family == CHIP_RV530) { |
109 | if (rdev->family == CHIP_RV530) { |
110 | tmp = RREG32(RV530_GB_PIPE_SELECT2); |
110 | tmp = RREG32(RV530_GB_PIPE_SELECT2); |
111 | if ((tmp & 3) == 3) |
111 | if ((tmp & 3) == 3) |
112 | rdev->num_z_pipes = 2; |
112 | rdev->num_z_pipes = 2; |
113 | else |
113 | else |
114 | rdev->num_z_pipes = 1; |
114 | rdev->num_z_pipes = 1; |
115 | } else |
115 | } else |
116 | rdev->num_z_pipes = 1; |
116 | rdev->num_z_pipes = 1; |
117 | 117 | ||
118 | DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", |
118 | DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", |
119 | rdev->num_gb_pipes, rdev->num_z_pipes); |
119 | rdev->num_gb_pipes, rdev->num_z_pipes); |
120 | } |
120 | } |
121 | 121 | ||
122 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
122 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
123 | { |
123 | { |
124 | u32 r; |
124 | u32 r; |
125 | 125 | ||
126 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); |
126 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); |
127 | r = RREG32(R_0001FC_MC_IND_DATA); |
127 | r = RREG32(R_0001FC_MC_IND_DATA); |
128 | return r; |
128 | return r; |
129 | } |
129 | } |
130 | 130 | ||
131 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
131 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
132 | { |
132 | { |
133 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | |
133 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | |
134 | S_0001F8_MC_IND_WR_EN(1)); |
134 | S_0001F8_MC_IND_WR_EN(1)); |
135 | WREG32(R_0001FC_MC_IND_DATA, v); |
135 | WREG32(R_0001FC_MC_IND_DATA, v); |
136 | } |
136 | } |
137 | 137 | ||
138 | static void r420_debugfs(struct radeon_device *rdev) |
138 | static void r420_debugfs(struct radeon_device *rdev) |
139 | { |
139 | { |
140 | if (r100_debugfs_rbbm_init(rdev)) { |
140 | if (r100_debugfs_rbbm_init(rdev)) { |
141 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
141 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
142 | } |
142 | } |
143 | if (r420_debugfs_pipes_info_init(rdev)) { |
143 | if (r420_debugfs_pipes_info_init(rdev)) { |
144 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
144 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
145 | } |
145 | } |
146 | } |
146 | } |
147 | 147 | ||
148 | static void r420_clock_resume(struct radeon_device *rdev) |
148 | static void r420_clock_resume(struct radeon_device *rdev) |
149 | { |
149 | { |
150 | u32 sclk_cntl; |
150 | u32 sclk_cntl; |
151 | 151 | ||
152 | if (radeon_dynclks != -1 && radeon_dynclks) |
152 | if (radeon_dynclks != -1 && radeon_dynclks) |
153 | radeon_atom_set_clock_gating(rdev, 1); |
153 | radeon_atom_set_clock_gating(rdev, 1); |
154 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
154 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
155 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
155 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
156 | if (rdev->family == CHIP_R420) |
156 | if (rdev->family == CHIP_R420) |
157 | sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); |
157 | sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); |
158 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); |
158 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); |
159 | } |
159 | } |
160 | 160 | ||
161 | static void r420_cp_errata_init(struct radeon_device *rdev) |
161 | static void r420_cp_errata_init(struct radeon_device *rdev) |
162 | { |
162 | { |
163 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
163 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
164 | 164 | ||
165 | /* RV410 and R420 can lock up if CP DMA to host memory happens |
165 | /* RV410 and R420 can lock up if CP DMA to host memory happens |
166 | * while the 2D engine is busy. |
166 | * while the 2D engine is busy. |
167 | * |
167 | * |
168 | * The proper workaround is to queue a RESYNC at the beginning |
168 | * The proper workaround is to queue a RESYNC at the beginning |
169 | * of the CP init, apparently. |
169 | * of the CP init, apparently. |
170 | */ |
170 | */ |
171 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); |
171 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); |
172 | radeon_ring_lock(rdev, ring, 8); |
172 | radeon_ring_lock(rdev, ring, 8); |
173 | radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); |
173 | radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); |
174 | radeon_ring_write(ring, rdev->config.r300.resync_scratch); |
174 | radeon_ring_write(ring, rdev->config.r300.resync_scratch); |
175 | radeon_ring_write(ring, 0xDEADBEEF); |
175 | radeon_ring_write(ring, 0xDEADBEEF); |
176 | radeon_ring_unlock_commit(rdev, ring); |
176 | radeon_ring_unlock_commit(rdev, ring); |
177 | } |
177 | } |
178 | 178 | ||
179 | static void r420_cp_errata_fini(struct radeon_device *rdev) |
179 | static void r420_cp_errata_fini(struct radeon_device *rdev) |
180 | { |
180 | { |
181 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
181 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
182 | 182 | ||
183 | /* Catch the RESYNC we dispatched all the way back, |
183 | /* Catch the RESYNC we dispatched all the way back, |
184 | * at the very beginning of the CP init. |
184 | * at the very beginning of the CP init. |
185 | */ |
185 | */ |
186 | radeon_ring_lock(rdev, ring, 8); |
186 | radeon_ring_lock(rdev, ring, 8); |
187 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
187 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
188 | radeon_ring_write(ring, R300_RB3D_DC_FINISH); |
188 | radeon_ring_write(ring, R300_RB3D_DC_FINISH); |
189 | radeon_ring_unlock_commit(rdev, ring); |
189 | radeon_ring_unlock_commit(rdev, ring); |
190 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); |
190 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); |
191 | } |
191 | } |
192 | 192 | ||
193 | static int r420_startup(struct radeon_device *rdev) |
193 | static int r420_startup(struct radeon_device *rdev) |
194 | { |
194 | { |
195 | int r; |
195 | int r; |
196 | 196 | ||
197 | /* set common regs */ |
197 | /* set common regs */ |
198 | r100_set_common_regs(rdev); |
198 | r100_set_common_regs(rdev); |
199 | /* program mc */ |
199 | /* program mc */ |
200 | r300_mc_program(rdev); |
200 | r300_mc_program(rdev); |
201 | /* Resume clock */ |
201 | /* Resume clock */ |
202 | r420_clock_resume(rdev); |
202 | r420_clock_resume(rdev); |
203 | /* Initialize GART (initialize after TTM so we can allocate |
203 | /* Initialize GART (initialize after TTM so we can allocate |
204 | * memory through TTM but finalize after TTM) */ |
204 | * memory through TTM but finalize after TTM) */ |
205 | if (rdev->flags & RADEON_IS_PCIE) { |
205 | if (rdev->flags & RADEON_IS_PCIE) { |
206 | r = rv370_pcie_gart_enable(rdev); |
206 | r = rv370_pcie_gart_enable(rdev); |
207 | if (r) |
207 | if (r) |
208 | return r; |
208 | return r; |
209 | } |
209 | } |
210 | if (rdev->flags & RADEON_IS_PCI) { |
210 | if (rdev->flags & RADEON_IS_PCI) { |
211 | r = r100_pci_gart_enable(rdev); |
211 | r = r100_pci_gart_enable(rdev); |
212 | if (r) |
212 | if (r) |
213 | return r; |
213 | return r; |
214 | } |
214 | } |
215 | r420_pipes_init(rdev); |
215 | r420_pipes_init(rdev); |
216 | 216 | ||
217 | /* allocate wb buffer */ |
217 | /* allocate wb buffer */ |
218 | r = radeon_wb_init(rdev); |
218 | r = radeon_wb_init(rdev); |
219 | if (r) |
219 | if (r) |
220 | return r; |
220 | return r; |
- | 221 | ||
- | 222 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
|
- | 223 | if (r) { |
|
- | 224 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
|
- | 225 | return r; |
|
- | 226 | } |
|
221 | 227 | ||
222 | /* Enable IRQ */ |
228 | /* Enable IRQ */ |
223 | r100_irq_set(rdev); |
229 | r100_irq_set(rdev); |
224 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
230 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
225 | /* 1M ring buffer */ |
231 | /* 1M ring buffer */ |
226 | r = r100_cp_init(rdev, 1024 * 1024); |
232 | r = r100_cp_init(rdev, 1024 * 1024); |
227 | if (r) { |
233 | if (r) { |
228 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
234 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
229 | return r; |
235 | return r; |
230 | } |
236 | } |
231 | r420_cp_errata_init(rdev); |
237 | r420_cp_errata_init(rdev); |
232 | 238 | ||
233 | r = radeon_ib_pool_init(rdev); |
239 | r = radeon_ib_pool_init(rdev); |
234 | if (r) { |
240 | if (r) { |
235 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
241 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
236 | return r; |
242 | return r; |
237 | } |
243 | } |
238 | 244 | ||
239 | return 0; |
245 | return 0; |
240 | } |
246 | } |
241 | 247 | ||
242 | 248 | ||
243 | 249 | ||
244 | 250 | ||
245 | 251 | ||
246 | 252 | ||
247 | int r420_init(struct radeon_device *rdev) |
253 | int r420_init(struct radeon_device *rdev) |
248 | { |
254 | { |
249 | int r; |
255 | int r; |
250 | 256 | ||
251 | /* Initialize scratch registers */ |
257 | /* Initialize scratch registers */ |
252 | radeon_scratch_init(rdev); |
258 | radeon_scratch_init(rdev); |
253 | /* Initialize surface registers */ |
259 | /* Initialize surface registers */ |
254 | radeon_surface_init(rdev); |
260 | radeon_surface_init(rdev); |
255 | /* TODO: disable VGA need to use VGA request */ |
261 | /* TODO: disable VGA need to use VGA request */ |
256 | /* restore some register to sane defaults */ |
262 | /* restore some register to sane defaults */ |
257 | r100_restore_sanity(rdev); |
263 | r100_restore_sanity(rdev); |
258 | /* BIOS*/ |
264 | /* BIOS*/ |
259 | if (!radeon_get_bios(rdev)) { |
265 | if (!radeon_get_bios(rdev)) { |
260 | if (ASIC_IS_AVIVO(rdev)) |
266 | if (ASIC_IS_AVIVO(rdev)) |
261 | return -EINVAL; |
267 | return -EINVAL; |
262 | } |
268 | } |
263 | if (rdev->is_atom_bios) { |
269 | if (rdev->is_atom_bios) { |
264 | r = radeon_atombios_init(rdev); |
270 | r = radeon_atombios_init(rdev); |
265 | if (r) { |
271 | if (r) { |
266 | return r; |
272 | return r; |
267 | } |
273 | } |
268 | } else { |
274 | } else { |
269 | r = radeon_combios_init(rdev); |
275 | r = radeon_combios_init(rdev); |
270 | if (r) { |
276 | if (r) { |
271 | return r; |
277 | return r; |
272 | } |
278 | } |
273 | } |
279 | } |
274 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
280 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
275 | if (radeon_asic_reset(rdev)) { |
281 | if (radeon_asic_reset(rdev)) { |
276 | dev_warn(rdev->dev, |
282 | dev_warn(rdev->dev, |
277 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
283 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
278 | RREG32(R_000E40_RBBM_STATUS), |
284 | RREG32(R_000E40_RBBM_STATUS), |
279 | RREG32(R_0007C0_CP_STAT)); |
285 | RREG32(R_0007C0_CP_STAT)); |
280 | } |
286 | } |
281 | /* check if cards are posted or not */ |
287 | /* check if cards are posted or not */ |
282 | if (radeon_boot_test_post_card(rdev) == false) |
288 | if (radeon_boot_test_post_card(rdev) == false) |
283 | return -EINVAL; |
289 | return -EINVAL; |
284 | 290 | ||
285 | /* Initialize clocks */ |
291 | /* Initialize clocks */ |
286 | radeon_get_clock_info(rdev->ddev); |
292 | radeon_get_clock_info(rdev->ddev); |
287 | /* initialize AGP */ |
293 | /* initialize AGP */ |
288 | if (rdev->flags & RADEON_IS_AGP) { |
294 | if (rdev->flags & RADEON_IS_AGP) { |
289 | r = radeon_agp_init(rdev); |
295 | r = radeon_agp_init(rdev); |
290 | if (r) { |
296 | if (r) { |
291 | radeon_agp_disable(rdev); |
297 | radeon_agp_disable(rdev); |
292 | } |
298 | } |
293 | } |
299 | } |
294 | /* initialize memory controller */ |
300 | /* initialize memory controller */ |
295 | r300_mc_init(rdev); |
301 | r300_mc_init(rdev); |
296 | r420_debugfs(rdev); |
302 | r420_debugfs(rdev); |
297 | /* Fence driver */ |
303 | /* Fence driver */ |
298 | r = radeon_fence_driver_init(rdev); |
304 | r = radeon_fence_driver_init(rdev); |
299 | if (r) { |
305 | if (r) { |
300 | return r; |
306 | return r; |
301 | } |
307 | } |
302 | r = radeon_irq_kms_init(rdev); |
308 | r = radeon_irq_kms_init(rdev); |
303 | if (r) { |
309 | if (r) { |
304 | return r; |
310 | return r; |
305 | } |
311 | } |
306 | /* Memory manager */ |
312 | /* Memory manager */ |
307 | r = radeon_bo_init(rdev); |
313 | r = radeon_bo_init(rdev); |
308 | if (r) { |
314 | if (r) { |
309 | return r; |
315 | return r; |
310 | } |
316 | } |
311 | if (rdev->family == CHIP_R420) |
317 | if (rdev->family == CHIP_R420) |
312 | r100_enable_bm(rdev); |
318 | r100_enable_bm(rdev); |
313 | 319 | ||
314 | if (rdev->flags & RADEON_IS_PCIE) { |
320 | if (rdev->flags & RADEON_IS_PCIE) { |
315 | r = rv370_pcie_gart_init(rdev); |
321 | r = rv370_pcie_gart_init(rdev); |
316 | if (r) |
322 | if (r) |
317 | return r; |
323 | return r; |
318 | } |
324 | } |
319 | if (rdev->flags & RADEON_IS_PCI) { |
325 | if (rdev->flags & RADEON_IS_PCI) { |
320 | r = r100_pci_gart_init(rdev); |
326 | r = r100_pci_gart_init(rdev); |
321 | if (r) |
327 | if (r) |
322 | return r; |
328 | return r; |
323 | } |
329 | } |
324 | r420_set_reg_safe(rdev); |
330 | r420_set_reg_safe(rdev); |
325 | 331 | ||
326 | rdev->accel_working = true; |
332 | rdev->accel_working = true; |
327 | r = r420_startup(rdev); |
333 | r = r420_startup(rdev); |
328 | if (r) { |
334 | if (r) { |
329 | /* Somethings want wront with the accel init stop accel */ |
335 | /* Somethings want wront with the accel init stop accel */ |
330 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
336 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
331 | if (rdev->flags & RADEON_IS_PCIE) |
337 | if (rdev->flags & RADEON_IS_PCIE) |
332 | rv370_pcie_gart_fini(rdev); |
338 | rv370_pcie_gart_fini(rdev); |
333 | if (rdev->flags & RADEON_IS_PCI) |
339 | if (rdev->flags & RADEON_IS_PCI) |
334 | r100_pci_gart_fini(rdev); |
340 | r100_pci_gart_fini(rdev); |
335 | rdev->accel_working = false; |
341 | rdev->accel_working = false; |
336 | } |
342 | } |
337 | return 0; |
343 | return 0; |
338 | } |
344 | } |
339 | 345 | ||
340 | /* |
346 | /* |
341 | * Debugfs info |
347 | * Debugfs info |
342 | */ |
348 | */ |
343 | #if defined(CONFIG_DEBUG_FS) |
349 | #if defined(CONFIG_DEBUG_FS) |
344 | static int r420_debugfs_pipes_info(struct seq_file *m, void *data) |
350 | static int r420_debugfs_pipes_info(struct seq_file *m, void *data) |
345 | { |
351 | { |
346 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
352 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
347 | struct drm_device *dev = node->minor->dev; |
353 | struct drm_device *dev = node->minor->dev; |
348 | struct radeon_device *rdev = dev->dev_private; |
354 | struct radeon_device *rdev = dev->dev_private; |
349 | uint32_t tmp; |
355 | uint32_t tmp; |
350 | 356 | ||
351 | tmp = RREG32(R400_GB_PIPE_SELECT); |
357 | tmp = RREG32(R400_GB_PIPE_SELECT); |
352 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
358 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
353 | tmp = RREG32(R300_GB_TILE_CONFIG); |
359 | tmp = RREG32(R300_GB_TILE_CONFIG); |
354 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
360 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
355 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
361 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
356 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
362 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
357 | return 0; |
363 | return 0; |
358 | } |
364 | } |
359 | 365 | ||
360 | static struct drm_info_list r420_pipes_info_list[] = { |
366 | static struct drm_info_list r420_pipes_info_list[] = { |
361 | {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL}, |
367 | {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL}, |
362 | }; |
368 | }; |
363 | #endif |
369 | #endif |
364 | 370 | ||
365 | int r420_debugfs_pipes_info_init(struct radeon_device *rdev) |
371 | int r420_debugfs_pipes_info_init(struct radeon_device *rdev) |
366 | { |
372 | { |
367 | #if defined(CONFIG_DEBUG_FS) |
373 | #if defined(CONFIG_DEBUG_FS) |
368 | return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1); |
374 | return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1); |
369 | #else |
375 | #else |
370 | return 0; |
376 | return 0; |
371 | #endif |
377 | #endif |
372 | }><>><>><>><>><>><>><> |
378 | }><>><>><>><>><>><>><> |