Rev 3764 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1128 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | */ |
||
2997 | Serge | 28 | #include |
1128 | serge | 29 | #include "radeon.h" |
1963 | serge | 30 | #include "radeon_asic.h" |
1179 | serge | 31 | #include "atom.h" |
1221 | serge | 32 | #include "rs690d.h" |
1128 | serge | 33 | |
2997 | Serge | 34 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
1128 | serge | 35 | { |
36 | unsigned i; |
||
37 | uint32_t tmp; |
||
38 | |||
39 | for (i = 0; i < rdev->usec_timeout; i++) { |
||
40 | /* read MC_STATUS */ |
||
1221 | serge | 41 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
42 | if (G_000090_MC_SYSTEM_IDLE(tmp)) |
||
1128 | serge | 43 | return 0; |
1221 | serge | 44 | udelay(1); |
1128 | serge | 45 | } |
46 | return -1; |
||
47 | } |
||
48 | |||
1221 | serge | 49 | static void rs690_gpu_init(struct radeon_device *rdev) |
1128 | serge | 50 | { |
51 | /* FIXME: is this correct ? */ |
||
52 | r420_pipes_init(rdev); |
||
53 | if (rs690_mc_wait_for_idle(rdev)) { |
||
54 | printk(KERN_WARNING "Failed to wait MC idle while " |
||
55 | "programming pipes. Bad things might happen.\n"); |
||
56 | } |
||
57 | } |
||
58 | |||
1963 | serge | 59 | union igp_info { |
60 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; |
||
61 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; |
||
62 | }; |
||
63 | |||
1179 | serge | 64 | void rs690_pm_info(struct radeon_device *rdev) |
65 | { |
||
66 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
||
1963 | serge | 67 | union igp_info *info; |
1179 | serge | 68 | uint16_t data_offset; |
69 | uint8_t frev, crev; |
||
70 | fixed20_12 tmp; |
||
71 | |||
1963 | serge | 72 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, |
73 | &frev, &crev, &data_offset)) { |
||
74 | info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); |
||
75 | |||
1179 | serge | 76 | /* Get various system informations from bios */ |
77 | switch (crev) { |
||
78 | case 1: |
||
1963 | serge | 79 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); |
||
81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
||
82 | if (le16_to_cpu(info->info.usK8MemoryClock)) |
||
83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
||
84 | else if (rdev->clock.default_mclk) { |
||
85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
||
86 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
||
87 | } else |
||
88 | rdev->pm.igp_system_mclk.full = dfixed_const(400); |
||
89 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
||
90 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
||
1179 | serge | 91 | break; |
92 | case 2: |
||
1963 | serge | 93 | tmp.full = dfixed_const(100); |
94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); |
||
95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
||
96 | if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) |
||
97 | rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); |
||
98 | else if (rdev->clock.default_mclk) |
||
99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
||
100 | else |
||
101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); |
||
102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
||
103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); |
||
104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
||
105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
||
1179 | serge | 106 | break; |
107 | default: |
||
108 | /* We assume the slower possible clock ie worst case */ |
||
1963 | serge | 109 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
110 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
||
111 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
||
112 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
||
1179 | serge | 113 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
114 | break; |
||
115 | } |
||
1963 | serge | 116 | } else { |
117 | /* We assume the slower possible clock ie worst case */ |
||
118 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
||
119 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
||
120 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
||
121 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
||
122 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
||
123 | } |
||
1179 | serge | 124 | /* Compute various bandwidth */ |
125 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
||
1963 | serge | 126 | tmp.full = dfixed_const(4); |
127 | rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); |
||
1179 | serge | 128 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 |
129 | * = ht_clk * ht_width / 5 |
||
130 | */ |
||
1963 | serge | 131 | tmp.full = dfixed_const(5); |
132 | rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, |
||
1179 | serge | 133 | rdev->pm.igp_ht_link_width); |
1963 | serge | 134 | rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); |
1179 | serge | 135 | if (tmp.full < rdev->pm.max_bandwidth.full) { |
136 | /* HT link is a limiting factor */ |
||
137 | rdev->pm.max_bandwidth.full = tmp.full; |
||
138 | } |
||
139 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 |
||
140 | * = (sideport_clk * 14) / 10 |
||
141 | */ |
||
1963 | serge | 142 | tmp.full = dfixed_const(14); |
143 | rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); |
||
144 | tmp.full = dfixed_const(10); |
||
145 | rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); |
||
1179 | serge | 146 | } |
147 | |||
2997 | Serge | 148 | static void rs690_mc_init(struct radeon_device *rdev) |
1128 | serge | 149 | { |
1430 | serge | 150 | u64 base; |
3764 | Serge | 151 | uint32_t h_addr, l_addr; |
152 | unsigned long long k8_addr; |
||
1128 | serge | 153 | |
154 | rs400_gart_adjust_size(rdev); |
||
155 | rdev->mc.vram_is_ddr = true; |
||
156 | rdev->mc.vram_width = 128; |
||
1179 | serge | 157 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
158 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
||
1963 | serge | 159 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
160 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
||
1430 | serge | 161 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
162 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
||
163 | base = G_000100_MC_FB_START(base) << 16; |
||
1963 | serge | 164 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
5078 | serge | 165 | /* Some boards seem to be configured for 128MB of sideport memory, |
166 | * but really only have 64MB. Just skip the sideport and use |
||
167 | * UMA memory. |
||
168 | */ |
||
169 | if (rdev->mc.igp_sideport_enabled && |
||
170 | (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { |
||
171 | base += 128 * 1024 * 1024; |
||
172 | rdev->mc.real_vram_size -= 128 * 1024 * 1024; |
||
173 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
||
174 | } |
||
3764 | Serge | 175 | |
176 | /* Use K8 direct mapping for fast fb access. */ |
||
177 | rdev->fastfb_working = false; |
||
178 | h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL)); |
||
179 | l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION); |
||
180 | k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; |
||
181 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) |
||
182 | if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) |
||
183 | #endif |
||
184 | { |
||
185 | /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport |
||
186 | * memory is present. |
||
187 | */ |
||
188 | if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { |
||
189 | DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", |
||
190 | (unsigned long long)rdev->mc.aper_base, k8_addr); |
||
191 | rdev->mc.aper_base = (resource_size_t)k8_addr; |
||
192 | rdev->fastfb_working = true; |
||
193 | } |
||
194 | } |
||
195 | |||
1179 | serge | 196 | rs690_pm_info(rdev); |
1430 | serge | 197 | radeon_vram_location(rdev, &rdev->mc, base); |
1963 | serge | 198 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; |
1430 | serge | 199 | radeon_gtt_location(rdev, &rdev->mc); |
1963 | serge | 200 | radeon_update_bandwidth_info(rdev); |
1403 | serge | 201 | } |
202 | |||
1179 | serge | 203 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
204 | struct drm_display_mode *mode1, |
||
205 | struct drm_display_mode *mode2) |
||
206 | { |
||
207 | u32 tmp; |
||
1128 | serge | 208 | |
1179 | serge | 209 | /* |
210 | * Line Buffer Setup |
||
211 | * There is a single line buffer shared by both display controllers. |
||
1221 | serge | 212 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
1179 | serge | 213 | * the display controllers. The paritioning can either be done |
214 | * manually or via one of four preset allocations specified in bits 1:0: |
||
215 | * 0 - line buffer is divided in half and shared between crtc |
||
216 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
||
217 | * 2 - D1 gets the whole buffer |
||
218 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
||
1221 | serge | 219 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
1179 | serge | 220 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
221 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
||
222 | */ |
||
1221 | serge | 223 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
224 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; |
||
1179 | serge | 225 | /* auto */ |
226 | if (mode1 && mode2) { |
||
227 | if (mode1->hdisplay > mode2->hdisplay) { |
||
228 | if (mode1->hdisplay > 2560) |
||
1221 | serge | 229 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
1179 | serge | 230 | else |
1221 | serge | 231 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
1179 | serge | 232 | } else if (mode2->hdisplay > mode1->hdisplay) { |
233 | if (mode2->hdisplay > 2560) |
||
1221 | serge | 234 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
1179 | serge | 235 | else |
1221 | serge | 236 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
1179 | serge | 237 | } else |
1221 | serge | 238 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
1179 | serge | 239 | } else if (mode1) { |
1221 | serge | 240 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
1179 | serge | 241 | } else if (mode2) { |
1221 | serge | 242 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
1179 | serge | 243 | } |
1221 | serge | 244 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
1179 | serge | 245 | } |
246 | |||
247 | struct rs690_watermark { |
||
248 | u32 lb_request_fifo_depth; |
||
249 | fixed20_12 num_line_pair; |
||
250 | fixed20_12 estimated_width; |
||
251 | fixed20_12 worst_case_latency; |
||
252 | fixed20_12 consumption_rate; |
||
253 | fixed20_12 active_time; |
||
254 | fixed20_12 dbpp; |
||
255 | fixed20_12 priority_mark_max; |
||
256 | fixed20_12 priority_mark; |
||
257 | fixed20_12 sclk; |
||
258 | }; |
||
259 | |||
2997 | Serge | 260 | static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, |
1179 | serge | 261 | struct radeon_crtc *crtc, |
5078 | serge | 262 | struct rs690_watermark *wm, |
263 | bool low) |
||
1179 | serge | 264 | { |
265 | struct drm_display_mode *mode = &crtc->base.mode; |
||
266 | fixed20_12 a, b, c; |
||
267 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
||
268 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
||
5078 | serge | 269 | fixed20_12 sclk, core_bandwidth, max_bandwidth; |
270 | u32 selected_sclk; |
||
1179 | serge | 271 | |
272 | if (!crtc->base.enabled) { |
||
273 | /* FIXME: wouldn't it better to set priority mark to maximum */ |
||
274 | wm->lb_request_fifo_depth = 4; |
||
275 | return; |
||
276 | } |
||
277 | |||
5078 | serge | 278 | if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) && |
279 | (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) |
||
280 | selected_sclk = radeon_dpm_get_sclk(rdev, low); |
||
281 | else |
||
282 | selected_sclk = rdev->pm.current_sclk; |
||
283 | |||
284 | /* sclk in Mhz */ |
||
285 | a.full = dfixed_const(100); |
||
286 | sclk.full = dfixed_const(selected_sclk); |
||
287 | sclk.full = dfixed_div(sclk, a); |
||
288 | |||
289 | /* core_bandwidth = sclk(Mhz) * 16 */ |
||
290 | a.full = dfixed_const(16); |
||
291 | core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
||
292 | |||
1963 | serge | 293 | if (crtc->vsc.full > dfixed_const(2)) |
294 | wm->num_line_pair.full = dfixed_const(2); |
||
1179 | serge | 295 | else |
1963 | serge | 296 | wm->num_line_pair.full = dfixed_const(1); |
1179 | serge | 297 | |
1963 | serge | 298 | b.full = dfixed_const(mode->crtc_hdisplay); |
299 | c.full = dfixed_const(256); |
||
300 | a.full = dfixed_div(b, c); |
||
301 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
||
302 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
||
303 | if (a.full < dfixed_const(4)) { |
||
1179 | serge | 304 | wm->lb_request_fifo_depth = 4; |
305 | } else { |
||
1963 | serge | 306 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
1179 | serge | 307 | } |
308 | |||
309 | /* Determine consumption rate |
||
310 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) |
||
311 | * vtaps = number of vertical taps, |
||
312 | * vsc = vertical scaling ratio, defined as source/destination |
||
313 | * hsc = horizontal scaling ration, defined as source/destination |
||
314 | */ |
||
1963 | serge | 315 | a.full = dfixed_const(mode->clock); |
316 | b.full = dfixed_const(1000); |
||
317 | a.full = dfixed_div(a, b); |
||
318 | pclk.full = dfixed_div(b, a); |
||
1179 | serge | 319 | if (crtc->rmx_type != RMX_OFF) { |
1963 | serge | 320 | b.full = dfixed_const(2); |
1179 | serge | 321 | if (crtc->vsc.full > b.full) |
322 | b.full = crtc->vsc.full; |
||
1963 | serge | 323 | b.full = dfixed_mul(b, crtc->hsc); |
324 | c.full = dfixed_const(2); |
||
325 | b.full = dfixed_div(b, c); |
||
326 | consumption_time.full = dfixed_div(pclk, b); |
||
1179 | serge | 327 | } else { |
328 | consumption_time.full = pclk.full; |
||
329 | } |
||
1963 | serge | 330 | a.full = dfixed_const(1); |
331 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
||
1179 | serge | 332 | |
333 | |||
334 | /* Determine line time |
||
335 | * LineTime = total time for one line of displayhtotal |
||
336 | * LineTime = total number of horizontal pixels |
||
337 | * pclk = pixel clock period(ns) |
||
338 | */ |
||
1963 | serge | 339 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
340 | line_time.full = dfixed_mul(a, pclk); |
||
1179 | serge | 341 | |
342 | /* Determine active time |
||
343 | * ActiveTime = time of active region of display within one line, |
||
344 | * hactive = total number of horizontal active pixels |
||
345 | * htotal = total number of horizontal pixels |
||
346 | */ |
||
1963 | serge | 347 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
348 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
||
349 | wm->active_time.full = dfixed_mul(line_time, b); |
||
350 | wm->active_time.full = dfixed_div(wm->active_time, a); |
||
1179 | serge | 351 | |
352 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
||
5078 | serge | 353 | max_bandwidth = core_bandwidth; |
1963 | serge | 354 | if (rdev->mc.igp_sideport_enabled) { |
5078 | serge | 355 | if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
1179 | serge | 356 | rdev->pm.sideport_bandwidth.full) |
5078 | serge | 357 | max_bandwidth = rdev->pm.sideport_bandwidth; |
358 | read_delay_latency.full = dfixed_const(370 * 800); |
||
359 | a.full = dfixed_const(1000); |
||
360 | b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a); |
||
361 | read_delay_latency.full = dfixed_div(read_delay_latency, b); |
||
362 | read_delay_latency.full = dfixed_mul(read_delay_latency, a); |
||
1179 | serge | 363 | } else { |
5078 | serge | 364 | if (max_bandwidth.full > rdev->pm.k8_bandwidth.full && |
1179 | serge | 365 | rdev->pm.k8_bandwidth.full) |
5078 | serge | 366 | max_bandwidth = rdev->pm.k8_bandwidth; |
367 | if (max_bandwidth.full > rdev->pm.ht_bandwidth.full && |
||
1179 | serge | 368 | rdev->pm.ht_bandwidth.full) |
5078 | serge | 369 | max_bandwidth = rdev->pm.ht_bandwidth; |
1963 | serge | 370 | read_delay_latency.full = dfixed_const(5000); |
1179 | serge | 371 | } |
372 | |||
373 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ |
||
1963 | serge | 374 | a.full = dfixed_const(16); |
5078 | serge | 375 | sclk.full = dfixed_mul(max_bandwidth, a); |
1963 | serge | 376 | a.full = dfixed_const(1000); |
5078 | serge | 377 | sclk.full = dfixed_div(a, sclk); |
1179 | serge | 378 | /* Determine chunk time |
379 | * ChunkTime = the time it takes the DCP to send one chunk of data |
||
380 | * to the LB which consists of pipeline delay and inter chunk gap |
||
381 | * sclk = system clock(ns) |
||
382 | */ |
||
1963 | serge | 383 | a.full = dfixed_const(256 * 13); |
5078 | serge | 384 | chunk_time.full = dfixed_mul(sclk, a); |
1963 | serge | 385 | a.full = dfixed_const(10); |
386 | chunk_time.full = dfixed_div(chunk_time, a); |
||
1179 | serge | 387 | |
388 | /* Determine the worst case latency |
||
389 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
||
390 | * WorstCaseLatency = worst case time from urgent to when the MC starts |
||
391 | * to return data |
||
392 | * READ_DELAY_IDLE_MAX = constant of 1us |
||
393 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
||
394 | * which consists of pipeline delay and inter chunk gap |
||
395 | */ |
||
1963 | serge | 396 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
397 | a.full = dfixed_const(3); |
||
398 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
||
1179 | serge | 399 | wm->worst_case_latency.full += read_delay_latency.full; |
400 | } else { |
||
1963 | serge | 401 | a.full = dfixed_const(2); |
402 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
||
1179 | serge | 403 | wm->worst_case_latency.full += read_delay_latency.full; |
404 | } |
||
405 | |||
406 | /* Determine the tolerable latency |
||
407 | * TolerableLatency = Any given request has only 1 line time |
||
408 | * for the data to be returned |
||
409 | * LBRequestFifoDepth = Number of chunk requests the LB can |
||
410 | * put into the request FIFO for a display |
||
411 | * LineTime = total time for one line of display |
||
412 | * ChunkTime = the time it takes the DCP to send one chunk |
||
413 | * of data to the LB which consists of |
||
414 | * pipeline delay and inter chunk gap |
||
415 | */ |
||
1963 | serge | 416 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
1179 | serge | 417 | tolerable_latency.full = line_time.full; |
418 | } else { |
||
1963 | serge | 419 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
1179 | serge | 420 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
1963 | serge | 421 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
1179 | serge | 422 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
423 | } |
||
424 | /* We assume worst case 32bits (4 bytes) */ |
||
1963 | serge | 425 | wm->dbpp.full = dfixed_const(4 * 8); |
1179 | serge | 426 | |
427 | /* Determine the maximum priority mark |
||
428 | * width = viewport width in pixels |
||
429 | */ |
||
1963 | serge | 430 | a.full = dfixed_const(16); |
431 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
||
432 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
||
433 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
||
1179 | serge | 434 | |
435 | /* Determine estimated width */ |
||
436 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
||
1963 | serge | 437 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
438 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
||
439 | wm->priority_mark.full = dfixed_const(10); |
||
1179 | serge | 440 | } else { |
1963 | serge | 441 | a.full = dfixed_const(16); |
442 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
||
443 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
||
1179 | serge | 444 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
445 | } |
||
446 | } |
||
447 | |||
5078 | serge | 448 | static void rs690_compute_mode_priority(struct radeon_device *rdev, |
449 | struct rs690_watermark *wm0, |
||
450 | struct rs690_watermark *wm1, |
||
451 | struct drm_display_mode *mode0, |
||
452 | struct drm_display_mode *mode1, |
||
453 | u32 *d1mode_priority_a_cnt, |
||
454 | u32 *d2mode_priority_a_cnt) |
||
1179 | serge | 455 | { |
456 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
||
457 | fixed20_12 a, b; |
||
458 | |||
5078 | serge | 459 | *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); |
460 | *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); |
||
1963 | serge | 461 | |
1179 | serge | 462 | if (mode0 && mode1) { |
5078 | serge | 463 | if (dfixed_trunc(wm0->dbpp) > 64) |
464 | a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair); |
||
1179 | serge | 465 | else |
5078 | serge | 466 | a.full = wm0->num_line_pair.full; |
467 | if (dfixed_trunc(wm1->dbpp) > 64) |
||
468 | b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair); |
||
1179 | serge | 469 | else |
5078 | serge | 470 | b.full = wm1->num_line_pair.full; |
1179 | serge | 471 | a.full += b.full; |
5078 | serge | 472 | fill_rate.full = dfixed_div(wm0->sclk, a); |
473 | if (wm0->consumption_rate.full > fill_rate.full) { |
||
474 | b.full = wm0->consumption_rate.full - fill_rate.full; |
||
475 | b.full = dfixed_mul(b, wm0->active_time); |
||
476 | a.full = dfixed_mul(wm0->worst_case_latency, |
||
477 | wm0->consumption_rate); |
||
1179 | serge | 478 | a.full = a.full + b.full; |
1963 | serge | 479 | b.full = dfixed_const(16 * 1000); |
480 | priority_mark02.full = dfixed_div(a, b); |
||
1179 | serge | 481 | } else { |
5078 | serge | 482 | a.full = dfixed_mul(wm0->worst_case_latency, |
483 | wm0->consumption_rate); |
||
1963 | serge | 484 | b.full = dfixed_const(16 * 1000); |
485 | priority_mark02.full = dfixed_div(a, b); |
||
1179 | serge | 486 | } |
5078 | serge | 487 | if (wm1->consumption_rate.full > fill_rate.full) { |
488 | b.full = wm1->consumption_rate.full - fill_rate.full; |
||
489 | b.full = dfixed_mul(b, wm1->active_time); |
||
490 | a.full = dfixed_mul(wm1->worst_case_latency, |
||
491 | wm1->consumption_rate); |
||
1179 | serge | 492 | a.full = a.full + b.full; |
1963 | serge | 493 | b.full = dfixed_const(16 * 1000); |
494 | priority_mark12.full = dfixed_div(a, b); |
||
1179 | serge | 495 | } else { |
5078 | serge | 496 | a.full = dfixed_mul(wm1->worst_case_latency, |
497 | wm1->consumption_rate); |
||
1963 | serge | 498 | b.full = dfixed_const(16 * 1000); |
499 | priority_mark12.full = dfixed_div(a, b); |
||
1179 | serge | 500 | } |
5078 | serge | 501 | if (wm0->priority_mark.full > priority_mark02.full) |
502 | priority_mark02.full = wm0->priority_mark.full; |
||
503 | if (wm0->priority_mark_max.full > priority_mark02.full) |
||
504 | priority_mark02.full = wm0->priority_mark_max.full; |
||
505 | if (wm1->priority_mark.full > priority_mark12.full) |
||
506 | priority_mark12.full = wm1->priority_mark.full; |
||
507 | if (wm1->priority_mark_max.full > priority_mark12.full) |
||
508 | priority_mark12.full = wm1->priority_mark_max.full; |
||
509 | *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
||
510 | *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
||
1963 | serge | 511 | if (rdev->disp_priority == 2) { |
5078 | serge | 512 | *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
513 | *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
||
1963 | serge | 514 | } |
1179 | serge | 515 | } else if (mode0) { |
5078 | serge | 516 | if (dfixed_trunc(wm0->dbpp) > 64) |
517 | a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair); |
||
1179 | serge | 518 | else |
5078 | serge | 519 | a.full = wm0->num_line_pair.full; |
520 | fill_rate.full = dfixed_div(wm0->sclk, a); |
||
521 | if (wm0->consumption_rate.full > fill_rate.full) { |
||
522 | b.full = wm0->consumption_rate.full - fill_rate.full; |
||
523 | b.full = dfixed_mul(b, wm0->active_time); |
||
524 | a.full = dfixed_mul(wm0->worst_case_latency, |
||
525 | wm0->consumption_rate); |
||
1179 | serge | 526 | a.full = a.full + b.full; |
1963 | serge | 527 | b.full = dfixed_const(16 * 1000); |
528 | priority_mark02.full = dfixed_div(a, b); |
||
1179 | serge | 529 | } else { |
5078 | serge | 530 | a.full = dfixed_mul(wm0->worst_case_latency, |
531 | wm0->consumption_rate); |
||
1963 | serge | 532 | b.full = dfixed_const(16 * 1000); |
533 | priority_mark02.full = dfixed_div(a, b); |
||
1179 | serge | 534 | } |
5078 | serge | 535 | if (wm0->priority_mark.full > priority_mark02.full) |
536 | priority_mark02.full = wm0->priority_mark.full; |
||
537 | if (wm0->priority_mark_max.full > priority_mark02.full) |
||
538 | priority_mark02.full = wm0->priority_mark_max.full; |
||
539 | *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
||
1963 | serge | 540 | if (rdev->disp_priority == 2) |
5078 | serge | 541 | *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
1963 | serge | 542 | } else if (mode1) { |
5078 | serge | 543 | if (dfixed_trunc(wm1->dbpp) > 64) |
544 | a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair); |
||
1179 | serge | 545 | else |
5078 | serge | 546 | a.full = wm1->num_line_pair.full; |
547 | fill_rate.full = dfixed_div(wm1->sclk, a); |
||
548 | if (wm1->consumption_rate.full > fill_rate.full) { |
||
549 | b.full = wm1->consumption_rate.full - fill_rate.full; |
||
550 | b.full = dfixed_mul(b, wm1->active_time); |
||
551 | a.full = dfixed_mul(wm1->worst_case_latency, |
||
552 | wm1->consumption_rate); |
||
1179 | serge | 553 | a.full = a.full + b.full; |
1963 | serge | 554 | b.full = dfixed_const(16 * 1000); |
555 | priority_mark12.full = dfixed_div(a, b); |
||
1179 | serge | 556 | } else { |
5078 | serge | 557 | a.full = dfixed_mul(wm1->worst_case_latency, |
558 | wm1->consumption_rate); |
||
1963 | serge | 559 | b.full = dfixed_const(16 * 1000); |
560 | priority_mark12.full = dfixed_div(a, b); |
||
1179 | serge | 561 | } |
5078 | serge | 562 | if (wm1->priority_mark.full > priority_mark12.full) |
563 | priority_mark12.full = wm1->priority_mark.full; |
||
564 | if (wm1->priority_mark_max.full > priority_mark12.full) |
||
565 | priority_mark12.full = wm1->priority_mark_max.full; |
||
566 | *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
||
1963 | serge | 567 | if (rdev->disp_priority == 2) |
5078 | serge | 568 | *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
1179 | serge | 569 | } |
5078 | serge | 570 | } |
1963 | serge | 571 | |
5078 | serge | 572 | void rs690_bandwidth_update(struct radeon_device *rdev) |
573 | { |
||
574 | struct drm_display_mode *mode0 = NULL; |
||
575 | struct drm_display_mode *mode1 = NULL; |
||
576 | struct rs690_watermark wm0_high, wm0_low; |
||
577 | struct rs690_watermark wm1_high, wm1_low; |
||
578 | u32 tmp; |
||
579 | u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; |
||
580 | u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; |
||
581 | |||
582 | radeon_update_display_priority(rdev); |
||
583 | |||
584 | if (rdev->mode_info.crtcs[0]->base.enabled) |
||
585 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
||
586 | if (rdev->mode_info.crtcs[1]->base.enabled) |
||
587 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
||
588 | /* |
||
589 | * Set display0/1 priority up in the memory controller for |
||
590 | * modes if the user specifies HIGH for displaypriority |
||
591 | * option. |
||
592 | */ |
||
593 | if ((rdev->disp_priority == 2) && |
||
594 | ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { |
||
595 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
||
596 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
||
597 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
||
598 | if (mode0) |
||
599 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
||
600 | if (mode1) |
||
601 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); |
||
602 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); |
||
603 | } |
||
604 | rs690_line_buffer_adjust(rdev, mode0, mode1); |
||
605 | |||
606 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
||
607 | WREG32(R_006C9C_DCP_CONTROL, 0); |
||
608 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
||
609 | WREG32(R_006C9C_DCP_CONTROL, 2); |
||
610 | |||
611 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false); |
||
612 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false); |
||
613 | |||
614 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true); |
||
615 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true); |
||
616 | |||
617 | tmp = (wm0_high.lb_request_fifo_depth - 1); |
||
618 | tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16; |
||
619 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
||
620 | |||
621 | rs690_compute_mode_priority(rdev, |
||
622 | &wm0_high, &wm1_high, |
||
623 | mode0, mode1, |
||
624 | &d1mode_priority_a_cnt, &d2mode_priority_a_cnt); |
||
625 | rs690_compute_mode_priority(rdev, |
||
626 | &wm0_low, &wm1_low, |
||
627 | mode0, mode1, |
||
628 | &d1mode_priority_b_cnt, &d2mode_priority_b_cnt); |
||
629 | |||
1963 | serge | 630 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
5078 | serge | 631 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt); |
1963 | serge | 632 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
5078 | serge | 633 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt); |
1179 | serge | 634 | } |
635 | |||
1128 | serge | 636 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
637 | { |
||
5078 | serge | 638 | unsigned long flags; |
1128 | serge | 639 | uint32_t r; |
640 | |||
5078 | serge | 641 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
1221 | serge | 642 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
643 | r = RREG32(R_00007C_MC_DATA); |
||
644 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
||
5078 | serge | 645 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
1128 | serge | 646 | return r; |
647 | } |
||
648 | |||
649 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
||
650 | { |
||
5078 | serge | 651 | unsigned long flags; |
652 | |||
653 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
||
1221 | serge | 654 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
655 | S_000078_MC_IND_WR_EN(1)); |
||
656 | WREG32(R_00007C_MC_DATA, v); |
||
657 | WREG32(R_000078_MC_INDEX, 0x7F); |
||
5078 | serge | 658 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
1128 | serge | 659 | } |
1221 | serge | 660 | |
2997 | Serge | 661 | static void rs690_mc_program(struct radeon_device *rdev) |
1221 | serge | 662 | { |
663 | struct rv515_mc_save save; |
||
664 | |||
665 | /* Stops all mc clients */ |
||
666 | rv515_mc_stop(rdev, &save); |
||
667 | |||
668 | /* Wait for mc idle */ |
||
669 | if (rs690_mc_wait_for_idle(rdev)) |
||
670 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
||
671 | /* Program MC, should be a 32bits limited address space */ |
||
672 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, |
||
673 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | |
||
674 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
||
675 | WREG32(R_000134_HDP_FB_LOCATION, |
||
676 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); |
||
677 | |||
678 | rv515_mc_resume(rdev, &save); |
||
679 | } |
||
680 | |||
681 | static int rs690_startup(struct radeon_device *rdev) |
||
682 | { |
||
683 | int r; |
||
684 | |||
685 | rs690_mc_program(rdev); |
||
686 | /* Resume clock */ |
||
687 | rv515_clock_startup(rdev); |
||
688 | /* Initialize GPU configuration (# pipes, ...) */ |
||
689 | rs690_gpu_init(rdev); |
||
690 | /* Initialize GART (initialize after TTM so we can allocate |
||
691 | * memory through TTM but finalize after TTM) */ |
||
692 | r = rs400_gart_enable(rdev); |
||
693 | if (r) |
||
694 | return r; |
||
2005 | serge | 695 | |
696 | /* allocate wb buffer */ |
||
697 | r = radeon_wb_init(rdev); |
||
698 | if (r) |
||
699 | return r; |
||
700 | |||
3192 | Serge | 701 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
702 | if (r) { |
||
703 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
||
704 | return r; |
||
705 | } |
||
706 | |||
1221 | serge | 707 | /* Enable IRQ */ |
3764 | Serge | 708 | if (!rdev->irq.installed) { |
709 | r = radeon_irq_kms_init(rdev); |
||
710 | if (r) |
||
711 | return r; |
||
712 | } |
||
713 | |||
2005 | serge | 714 | rs600_irq_set(rdev); |
1403 | serge | 715 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1221 | serge | 716 | /* 1M ring buffer */ |
1413 | serge | 717 | r = r100_cp_init(rdev, 1024 * 1024); |
718 | if (r) { |
||
1963 | serge | 719 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
1413 | serge | 720 | return r; |
721 | } |
||
2997 | Serge | 722 | |
723 | r = radeon_ib_pool_init(rdev); |
||
2005 | serge | 724 | if (r) { |
2997 | Serge | 725 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
2005 | serge | 726 | return r; |
727 | } |
||
2997 | Serge | 728 | |
5078 | serge | 729 | r = r600_audio_init(rdev); |
730 | if (r) { |
||
731 | dev_err(rdev->dev, "failed initializing audio\n"); |
||
732 | return r; |
||
733 | } |
||
2997 | Serge | 734 | |
1221 | serge | 735 | return 0; |
736 | } |
||
737 | |||
738 | |||
739 | |||
740 | |||
741 | int rs690_init(struct radeon_device *rdev) |
||
742 | { |
||
743 | int r; |
||
744 | |||
745 | /* Disable VGA */ |
||
746 | rv515_vga_render_disable(rdev); |
||
747 | /* Initialize scratch registers */ |
||
748 | radeon_scratch_init(rdev); |
||
749 | /* Initialize surface registers */ |
||
750 | radeon_surface_init(rdev); |
||
1963 | serge | 751 | /* restore some register to sane defaults */ |
752 | r100_restore_sanity(rdev); |
||
1221 | serge | 753 | /* TODO: disable VGA need to use VGA request */ |
754 | /* BIOS*/ |
||
755 | if (!radeon_get_bios(rdev)) { |
||
756 | if (ASIC_IS_AVIVO(rdev)) |
||
757 | return -EINVAL; |
||
758 | } |
||
759 | if (rdev->is_atom_bios) { |
||
760 | r = radeon_atombios_init(rdev); |
||
761 | if (r) |
||
762 | return r; |
||
763 | } else { |
||
764 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); |
||
765 | return -EINVAL; |
||
766 | } |
||
767 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
||
1963 | serge | 768 | if (radeon_asic_reset(rdev)) { |
1221 | serge | 769 | dev_warn(rdev->dev, |
770 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
||
771 | RREG32(R_000E40_RBBM_STATUS), |
||
772 | RREG32(R_0007C0_CP_STAT)); |
||
773 | } |
||
774 | /* check if cards are posted or not */ |
||
1403 | serge | 775 | if (radeon_boot_test_post_card(rdev) == false) |
776 | return -EINVAL; |
||
777 | |||
1221 | serge | 778 | /* Initialize clocks */ |
779 | radeon_get_clock_info(rdev->ddev); |
||
1430 | serge | 780 | /* initialize memory controller */ |
781 | rs690_mc_init(rdev); |
||
1221 | serge | 782 | rv515_debugfs(rdev); |
783 | /* Fence driver */ |
||
2005 | serge | 784 | r = radeon_fence_driver_init(rdev); |
785 | if (r) |
||
786 | return r; |
||
1221 | serge | 787 | /* Memory manager */ |
1403 | serge | 788 | r = radeon_bo_init(rdev); |
1221 | serge | 789 | if (r) |
790 | return r; |
||
791 | r = rs400_gart_init(rdev); |
||
792 | if (r) |
||
793 | return r; |
||
794 | rs600_set_safe_registers(rdev); |
||
2997 | Serge | 795 | |
5078 | serge | 796 | /* Initialize power management */ |
797 | radeon_pm_init(rdev); |
||
798 | |||
1221 | serge | 799 | rdev->accel_working = true; |
800 | r = rs690_startup(rdev); |
||
801 | if (r) { |
||
802 | /* Somethings want wront with the accel init stop accel */ |
||
803 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
||
804 | // r100_cp_fini(rdev); |
||
805 | // r100_wb_fini(rdev); |
||
806 | // r100_ib_fini(rdev); |
||
807 | rs400_gart_fini(rdev); |
||
808 | // radeon_irq_kms_fini(rdev); |
||
809 | rdev->accel_working = false; |
||
810 | } |
||
811 | return 0; |
||
812 | }><>>>><>><>>> |