Rev 5271 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5271 | Rev 6104 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | #include "radeon.h" |
29 | #include "radeon.h" |
30 | #include "radeon_asic.h" |
30 | #include "radeon_asic.h" |
- | 31 | #include "radeon_audio.h" |
|
31 | #include "atom.h" |
32 | #include "atom.h" |
32 | #include "rs690d.h" |
33 | #include "rs690d.h" |
33 | 34 | ||
34 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
35 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
35 | { |
36 | { |
36 | unsigned i; |
37 | unsigned i; |
37 | uint32_t tmp; |
38 | uint32_t tmp; |
38 | 39 | ||
39 | for (i = 0; i < rdev->usec_timeout; i++) { |
40 | for (i = 0; i < rdev->usec_timeout; i++) { |
40 | /* read MC_STATUS */ |
41 | /* read MC_STATUS */ |
41 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
42 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
42 | if (G_000090_MC_SYSTEM_IDLE(tmp)) |
43 | if (G_000090_MC_SYSTEM_IDLE(tmp)) |
43 | return 0; |
44 | return 0; |
44 | udelay(1); |
45 | udelay(1); |
45 | } |
46 | } |
46 | return -1; |
47 | return -1; |
47 | } |
48 | } |
48 | 49 | ||
49 | static void rs690_gpu_init(struct radeon_device *rdev) |
50 | static void rs690_gpu_init(struct radeon_device *rdev) |
50 | { |
51 | { |
51 | /* FIXME: is this correct ? */ |
52 | /* FIXME: is this correct ? */ |
52 | r420_pipes_init(rdev); |
53 | r420_pipes_init(rdev); |
53 | if (rs690_mc_wait_for_idle(rdev)) { |
54 | if (rs690_mc_wait_for_idle(rdev)) { |
54 | printk(KERN_WARNING "Failed to wait MC idle while " |
55 | printk(KERN_WARNING "Failed to wait MC idle while " |
55 | "programming pipes. Bad things might happen.\n"); |
56 | "programming pipes. Bad things might happen.\n"); |
56 | } |
57 | } |
57 | } |
58 | } |
58 | 59 | ||
59 | union igp_info { |
60 | union igp_info { |
60 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; |
61 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; |
61 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; |
62 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; |
62 | }; |
63 | }; |
63 | 64 | ||
64 | void rs690_pm_info(struct radeon_device *rdev) |
65 | void rs690_pm_info(struct radeon_device *rdev) |
65 | { |
66 | { |
66 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
67 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
67 | union igp_info *info; |
68 | union igp_info *info; |
68 | uint16_t data_offset; |
69 | uint16_t data_offset; |
69 | uint8_t frev, crev; |
70 | uint8_t frev, crev; |
70 | fixed20_12 tmp; |
71 | fixed20_12 tmp; |
71 | 72 | ||
72 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, |
73 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, |
73 | &frev, &crev, &data_offset)) { |
74 | &frev, &crev, &data_offset)) { |
74 | info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); |
75 | info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); |
75 | 76 | ||
76 | /* Get various system informations from bios */ |
77 | /* Get various system informations from bios */ |
77 | switch (crev) { |
78 | switch (crev) { |
78 | case 1: |
79 | case 1: |
79 | tmp.full = dfixed_const(100); |
80 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); |
81 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); |
81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | if (le16_to_cpu(info->info.usK8MemoryClock)) |
83 | if (le16_to_cpu(info->info.usK8MemoryClock)) |
83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
84 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
84 | else if (rdev->clock.default_mclk) { |
85 | else if (rdev->clock.default_mclk) { |
85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
86 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
86 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
87 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
87 | } else |
88 | } else |
88 | rdev->pm.igp_system_mclk.full = dfixed_const(400); |
89 | rdev->pm.igp_system_mclk.full = dfixed_const(400); |
89 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
90 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
90 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
91 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
91 | break; |
92 | break; |
92 | case 2: |
93 | case 2: |
93 | tmp.full = dfixed_const(100); |
94 | tmp.full = dfixed_const(100); |
94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); |
95 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); |
95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
96 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
96 | if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) |
97 | if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) |
97 | rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); |
98 | rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); |
98 | else if (rdev->clock.default_mclk) |
99 | else if (rdev->clock.default_mclk) |
99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
100 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
100 | else |
101 | else |
101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); |
102 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); |
102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
103 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); |
104 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); |
104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
105 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
106 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
106 | break; |
107 | break; |
107 | default: |
108 | default: |
108 | /* We assume the slower possible clock ie worst case */ |
109 | /* We assume the slower possible clock ie worst case */ |
109 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
110 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
110 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
111 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
111 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
112 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
112 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
113 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
113 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
114 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
114 | break; |
115 | break; |
115 | } |
116 | } |
116 | } else { |
117 | } else { |
117 | /* We assume the slower possible clock ie worst case */ |
118 | /* We assume the slower possible clock ie worst case */ |
118 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
119 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
119 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
120 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
120 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
121 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
121 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
122 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
122 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
123 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
123 | } |
124 | } |
124 | /* Compute various bandwidth */ |
125 | /* Compute various bandwidth */ |
125 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
126 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
126 | tmp.full = dfixed_const(4); |
127 | tmp.full = dfixed_const(4); |
127 | rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); |
128 | rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); |
128 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 |
129 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 |
129 | * = ht_clk * ht_width / 5 |
130 | * = ht_clk * ht_width / 5 |
130 | */ |
131 | */ |
131 | tmp.full = dfixed_const(5); |
132 | tmp.full = dfixed_const(5); |
132 | rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, |
133 | rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, |
133 | rdev->pm.igp_ht_link_width); |
134 | rdev->pm.igp_ht_link_width); |
134 | rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); |
135 | rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); |
135 | if (tmp.full < rdev->pm.max_bandwidth.full) { |
136 | if (tmp.full < rdev->pm.max_bandwidth.full) { |
136 | /* HT link is a limiting factor */ |
137 | /* HT link is a limiting factor */ |
137 | rdev->pm.max_bandwidth.full = tmp.full; |
138 | rdev->pm.max_bandwidth.full = tmp.full; |
138 | } |
139 | } |
139 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 |
140 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 |
140 | * = (sideport_clk * 14) / 10 |
141 | * = (sideport_clk * 14) / 10 |
141 | */ |
142 | */ |
142 | tmp.full = dfixed_const(14); |
143 | tmp.full = dfixed_const(14); |
143 | rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); |
144 | rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); |
144 | tmp.full = dfixed_const(10); |
145 | tmp.full = dfixed_const(10); |
145 | rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); |
146 | rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); |
146 | } |
147 | } |
147 | 148 | ||
148 | static void rs690_mc_init(struct radeon_device *rdev) |
149 | static void rs690_mc_init(struct radeon_device *rdev) |
149 | { |
150 | { |
150 | u64 base; |
151 | u64 base; |
151 | uint32_t h_addr, l_addr; |
152 | uint32_t h_addr, l_addr; |
152 | unsigned long long k8_addr; |
153 | unsigned long long k8_addr; |
153 | 154 | ||
154 | rs400_gart_adjust_size(rdev); |
155 | rs400_gart_adjust_size(rdev); |
155 | rdev->mc.vram_is_ddr = true; |
156 | rdev->mc.vram_is_ddr = true; |
156 | rdev->mc.vram_width = 128; |
157 | rdev->mc.vram_width = 128; |
157 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
158 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
158 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
159 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
159 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
160 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
160 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
161 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
161 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
162 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
162 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
163 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
163 | base = G_000100_MC_FB_START(base) << 16; |
164 | base = G_000100_MC_FB_START(base) << 16; |
164 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
165 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
165 | /* Some boards seem to be configured for 128MB of sideport memory, |
166 | /* Some boards seem to be configured for 128MB of sideport memory, |
166 | * but really only have 64MB. Just skip the sideport and use |
167 | * but really only have 64MB. Just skip the sideport and use |
167 | * UMA memory. |
168 | * UMA memory. |
168 | */ |
169 | */ |
169 | if (rdev->mc.igp_sideport_enabled && |
170 | if (rdev->mc.igp_sideport_enabled && |
170 | (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { |
171 | (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { |
171 | base += 128 * 1024 * 1024; |
172 | base += 128 * 1024 * 1024; |
172 | rdev->mc.real_vram_size -= 128 * 1024 * 1024; |
173 | rdev->mc.real_vram_size -= 128 * 1024 * 1024; |
173 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
174 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
174 | } |
175 | } |
175 | 176 | ||
176 | /* Use K8 direct mapping for fast fb access. */ |
177 | /* Use K8 direct mapping for fast fb access. */ |
177 | rdev->fastfb_working = false; |
178 | rdev->fastfb_working = false; |
178 | h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL)); |
179 | h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL)); |
179 | l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION); |
180 | l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION); |
180 | k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; |
181 | k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; |
181 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) |
182 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) |
182 | if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) |
183 | if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) |
183 | #endif |
184 | #endif |
184 | { |
185 | { |
185 | /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport |
186 | /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport |
186 | * memory is present. |
187 | * memory is present. |
187 | */ |
188 | */ |
188 | if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { |
189 | if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { |
189 | DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", |
190 | DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", |
190 | (unsigned long long)rdev->mc.aper_base, k8_addr); |
191 | (unsigned long long)rdev->mc.aper_base, k8_addr); |
191 | rdev->mc.aper_base = (resource_size_t)k8_addr; |
192 | rdev->mc.aper_base = (resource_size_t)k8_addr; |
192 | rdev->fastfb_working = true; |
193 | rdev->fastfb_working = true; |
193 | } |
194 | } |
194 | } |
195 | } |
195 | 196 | ||
196 | rs690_pm_info(rdev); |
197 | rs690_pm_info(rdev); |
197 | radeon_vram_location(rdev, &rdev->mc, base); |
198 | radeon_vram_location(rdev, &rdev->mc, base); |
198 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; |
199 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; |
199 | radeon_gtt_location(rdev, &rdev->mc); |
200 | radeon_gtt_location(rdev, &rdev->mc); |
200 | radeon_update_bandwidth_info(rdev); |
201 | radeon_update_bandwidth_info(rdev); |
201 | } |
202 | } |
202 | 203 | ||
203 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
204 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
204 | struct drm_display_mode *mode1, |
205 | struct drm_display_mode *mode1, |
205 | struct drm_display_mode *mode2) |
206 | struct drm_display_mode *mode2) |
206 | { |
207 | { |
207 | u32 tmp; |
208 | u32 tmp; |
- | 209 | ||
- | 210 | /* Guess line buffer size to be 8192 pixels */ |
|
- | 211 | u32 lb_size = 8192; |
|
208 | 212 | ||
209 | /* |
213 | /* |
210 | * Line Buffer Setup |
214 | * Line Buffer Setup |
211 | * There is a single line buffer shared by both display controllers. |
215 | * There is a single line buffer shared by both display controllers. |
212 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
216 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
213 | * the display controllers. The paritioning can either be done |
217 | * the display controllers. The paritioning can either be done |
214 | * manually or via one of four preset allocations specified in bits 1:0: |
218 | * manually or via one of four preset allocations specified in bits 1:0: |
215 | * 0 - line buffer is divided in half and shared between crtc |
219 | * 0 - line buffer is divided in half and shared between crtc |
216 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
220 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
217 | * 2 - D1 gets the whole buffer |
221 | * 2 - D1 gets the whole buffer |
218 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
222 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
219 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
223 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
220 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
224 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
221 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
225 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
222 | */ |
226 | */ |
223 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
227 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
224 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; |
228 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; |
225 | /* auto */ |
229 | /* auto */ |
226 | if (mode1 && mode2) { |
230 | if (mode1 && mode2) { |
227 | if (mode1->hdisplay > mode2->hdisplay) { |
231 | if (mode1->hdisplay > mode2->hdisplay) { |
228 | if (mode1->hdisplay > 2560) |
232 | if (mode1->hdisplay > 2560) |
229 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
233 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
230 | else |
234 | else |
231 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
235 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
232 | } else if (mode2->hdisplay > mode1->hdisplay) { |
236 | } else if (mode2->hdisplay > mode1->hdisplay) { |
233 | if (mode2->hdisplay > 2560) |
237 | if (mode2->hdisplay > 2560) |
234 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
238 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
235 | else |
239 | else |
236 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
240 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
237 | } else |
241 | } else |
238 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
242 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
239 | } else if (mode1) { |
243 | } else if (mode1) { |
240 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
244 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
241 | } else if (mode2) { |
245 | } else if (mode2) { |
242 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
246 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
243 | } |
247 | } |
244 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
248 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
- | 249 | ||
- | 250 | /* Save number of lines the linebuffer leads before the scanout */ |
|
- | 251 | if (mode1) |
|
- | 252 | rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); |
|
- | 253 | ||
- | 254 | if (mode2) |
|
- | 255 | rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); |
|
245 | } |
256 | } |
246 | 257 | ||
247 | struct rs690_watermark { |
258 | struct rs690_watermark { |
248 | u32 lb_request_fifo_depth; |
259 | u32 lb_request_fifo_depth; |
249 | fixed20_12 num_line_pair; |
260 | fixed20_12 num_line_pair; |
250 | fixed20_12 estimated_width; |
261 | fixed20_12 estimated_width; |
251 | fixed20_12 worst_case_latency; |
262 | fixed20_12 worst_case_latency; |
252 | fixed20_12 consumption_rate; |
263 | fixed20_12 consumption_rate; |
253 | fixed20_12 active_time; |
264 | fixed20_12 active_time; |
254 | fixed20_12 dbpp; |
265 | fixed20_12 dbpp; |
255 | fixed20_12 priority_mark_max; |
266 | fixed20_12 priority_mark_max; |
256 | fixed20_12 priority_mark; |
267 | fixed20_12 priority_mark; |
257 | fixed20_12 sclk; |
268 | fixed20_12 sclk; |
258 | }; |
269 | }; |
259 | 270 | ||
260 | static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, |
271 | static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, |
261 | struct radeon_crtc *crtc, |
272 | struct radeon_crtc *crtc, |
262 | struct rs690_watermark *wm, |
273 | struct rs690_watermark *wm, |
263 | bool low) |
274 | bool low) |
264 | { |
275 | { |
265 | struct drm_display_mode *mode = &crtc->base.mode; |
276 | struct drm_display_mode *mode = &crtc->base.mode; |
266 | fixed20_12 a, b, c; |
277 | fixed20_12 a, b, c; |
267 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
278 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
268 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
279 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
269 | fixed20_12 sclk, core_bandwidth, max_bandwidth; |
280 | fixed20_12 sclk, core_bandwidth, max_bandwidth; |
270 | u32 selected_sclk; |
281 | u32 selected_sclk; |
271 | 282 | ||
272 | if (!crtc->base.enabled) { |
283 | if (!crtc->base.enabled) { |
273 | /* FIXME: wouldn't it better to set priority mark to maximum */ |
284 | /* FIXME: wouldn't it better to set priority mark to maximum */ |
274 | wm->lb_request_fifo_depth = 4; |
285 | wm->lb_request_fifo_depth = 4; |
275 | return; |
286 | return; |
276 | } |
287 | } |
277 | 288 | ||
278 | if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) && |
289 | if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) && |
279 | (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) |
290 | (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) |
280 | selected_sclk = radeon_dpm_get_sclk(rdev, low); |
291 | selected_sclk = radeon_dpm_get_sclk(rdev, low); |
281 | else |
292 | else |
282 | selected_sclk = rdev->pm.current_sclk; |
293 | selected_sclk = rdev->pm.current_sclk; |
283 | 294 | ||
284 | /* sclk in Mhz */ |
295 | /* sclk in Mhz */ |
285 | a.full = dfixed_const(100); |
296 | a.full = dfixed_const(100); |
286 | sclk.full = dfixed_const(selected_sclk); |
297 | sclk.full = dfixed_const(selected_sclk); |
287 | sclk.full = dfixed_div(sclk, a); |
298 | sclk.full = dfixed_div(sclk, a); |
288 | 299 | ||
289 | /* core_bandwidth = sclk(Mhz) * 16 */ |
300 | /* core_bandwidth = sclk(Mhz) * 16 */ |
290 | a.full = dfixed_const(16); |
301 | a.full = dfixed_const(16); |
291 | core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
302 | core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
292 | 303 | ||
293 | if (crtc->vsc.full > dfixed_const(2)) |
304 | if (crtc->vsc.full > dfixed_const(2)) |
294 | wm->num_line_pair.full = dfixed_const(2); |
305 | wm->num_line_pair.full = dfixed_const(2); |
295 | else |
306 | else |
296 | wm->num_line_pair.full = dfixed_const(1); |
307 | wm->num_line_pair.full = dfixed_const(1); |
297 | 308 | ||
298 | b.full = dfixed_const(mode->crtc_hdisplay); |
309 | b.full = dfixed_const(mode->crtc_hdisplay); |
299 | c.full = dfixed_const(256); |
310 | c.full = dfixed_const(256); |
300 | a.full = dfixed_div(b, c); |
311 | a.full = dfixed_div(b, c); |
301 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
312 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
302 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
313 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
303 | if (a.full < dfixed_const(4)) { |
314 | if (a.full < dfixed_const(4)) { |
304 | wm->lb_request_fifo_depth = 4; |
315 | wm->lb_request_fifo_depth = 4; |
305 | } else { |
316 | } else { |
306 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
317 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
307 | } |
318 | } |
308 | 319 | ||
309 | /* Determine consumption rate |
320 | /* Determine consumption rate |
310 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) |
321 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) |
311 | * vtaps = number of vertical taps, |
322 | * vtaps = number of vertical taps, |
312 | * vsc = vertical scaling ratio, defined as source/destination |
323 | * vsc = vertical scaling ratio, defined as source/destination |
313 | * hsc = horizontal scaling ration, defined as source/destination |
324 | * hsc = horizontal scaling ration, defined as source/destination |
314 | */ |
325 | */ |
315 | a.full = dfixed_const(mode->clock); |
326 | a.full = dfixed_const(mode->clock); |
316 | b.full = dfixed_const(1000); |
327 | b.full = dfixed_const(1000); |
317 | a.full = dfixed_div(a, b); |
328 | a.full = dfixed_div(a, b); |
318 | pclk.full = dfixed_div(b, a); |
329 | pclk.full = dfixed_div(b, a); |
319 | if (crtc->rmx_type != RMX_OFF) { |
330 | if (crtc->rmx_type != RMX_OFF) { |
320 | b.full = dfixed_const(2); |
331 | b.full = dfixed_const(2); |
321 | if (crtc->vsc.full > b.full) |
332 | if (crtc->vsc.full > b.full) |
322 | b.full = crtc->vsc.full; |
333 | b.full = crtc->vsc.full; |
323 | b.full = dfixed_mul(b, crtc->hsc); |
334 | b.full = dfixed_mul(b, crtc->hsc); |
324 | c.full = dfixed_const(2); |
335 | c.full = dfixed_const(2); |
325 | b.full = dfixed_div(b, c); |
336 | b.full = dfixed_div(b, c); |
326 | consumption_time.full = dfixed_div(pclk, b); |
337 | consumption_time.full = dfixed_div(pclk, b); |
327 | } else { |
338 | } else { |
328 | consumption_time.full = pclk.full; |
339 | consumption_time.full = pclk.full; |
329 | } |
340 | } |
330 | a.full = dfixed_const(1); |
341 | a.full = dfixed_const(1); |
331 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
342 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
332 | 343 | ||
333 | 344 | ||
334 | /* Determine line time |
345 | /* Determine line time |
335 | * LineTime = total time for one line of displayhtotal |
346 | * LineTime = total time for one line of displayhtotal |
336 | * LineTime = total number of horizontal pixels |
347 | * LineTime = total number of horizontal pixels |
337 | * pclk = pixel clock period(ns) |
348 | * pclk = pixel clock period(ns) |
338 | */ |
349 | */ |
339 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
350 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
340 | line_time.full = dfixed_mul(a, pclk); |
351 | line_time.full = dfixed_mul(a, pclk); |
341 | 352 | ||
342 | /* Determine active time |
353 | /* Determine active time |
343 | * ActiveTime = time of active region of display within one line, |
354 | * ActiveTime = time of active region of display within one line, |
344 | * hactive = total number of horizontal active pixels |
355 | * hactive = total number of horizontal active pixels |
345 | * htotal = total number of horizontal pixels |
356 | * htotal = total number of horizontal pixels |
346 | */ |
357 | */ |
347 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
358 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
348 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
359 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
349 | wm->active_time.full = dfixed_mul(line_time, b); |
360 | wm->active_time.full = dfixed_mul(line_time, b); |
350 | wm->active_time.full = dfixed_div(wm->active_time, a); |
361 | wm->active_time.full = dfixed_div(wm->active_time, a); |
351 | 362 | ||
352 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
363 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
353 | max_bandwidth = core_bandwidth; |
364 | max_bandwidth = core_bandwidth; |
354 | if (rdev->mc.igp_sideport_enabled) { |
365 | if (rdev->mc.igp_sideport_enabled) { |
355 | if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
366 | if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
356 | rdev->pm.sideport_bandwidth.full) |
367 | rdev->pm.sideport_bandwidth.full) |
357 | max_bandwidth = rdev->pm.sideport_bandwidth; |
368 | max_bandwidth = rdev->pm.sideport_bandwidth; |
358 | read_delay_latency.full = dfixed_const(370 * 800); |
369 | read_delay_latency.full = dfixed_const(370 * 800); |
359 | a.full = dfixed_const(1000); |
370 | a.full = dfixed_const(1000); |
360 | b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a); |
371 | b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a); |
361 | read_delay_latency.full = dfixed_div(read_delay_latency, b); |
372 | read_delay_latency.full = dfixed_div(read_delay_latency, b); |
362 | read_delay_latency.full = dfixed_mul(read_delay_latency, a); |
373 | read_delay_latency.full = dfixed_mul(read_delay_latency, a); |
363 | } else { |
374 | } else { |
364 | if (max_bandwidth.full > rdev->pm.k8_bandwidth.full && |
375 | if (max_bandwidth.full > rdev->pm.k8_bandwidth.full && |
365 | rdev->pm.k8_bandwidth.full) |
376 | rdev->pm.k8_bandwidth.full) |
366 | max_bandwidth = rdev->pm.k8_bandwidth; |
377 | max_bandwidth = rdev->pm.k8_bandwidth; |
367 | if (max_bandwidth.full > rdev->pm.ht_bandwidth.full && |
378 | if (max_bandwidth.full > rdev->pm.ht_bandwidth.full && |
368 | rdev->pm.ht_bandwidth.full) |
379 | rdev->pm.ht_bandwidth.full) |
369 | max_bandwidth = rdev->pm.ht_bandwidth; |
380 | max_bandwidth = rdev->pm.ht_bandwidth; |
370 | read_delay_latency.full = dfixed_const(5000); |
381 | read_delay_latency.full = dfixed_const(5000); |
371 | } |
382 | } |
372 | 383 | ||
373 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ |
384 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ |
374 | a.full = dfixed_const(16); |
385 | a.full = dfixed_const(16); |
375 | sclk.full = dfixed_mul(max_bandwidth, a); |
386 | sclk.full = dfixed_mul(max_bandwidth, a); |
376 | a.full = dfixed_const(1000); |
387 | a.full = dfixed_const(1000); |
377 | sclk.full = dfixed_div(a, sclk); |
388 | sclk.full = dfixed_div(a, sclk); |
378 | /* Determine chunk time |
389 | /* Determine chunk time |
379 | * ChunkTime = the time it takes the DCP to send one chunk of data |
390 | * ChunkTime = the time it takes the DCP to send one chunk of data |
380 | * to the LB which consists of pipeline delay and inter chunk gap |
391 | * to the LB which consists of pipeline delay and inter chunk gap |
381 | * sclk = system clock(ns) |
392 | * sclk = system clock(ns) |
382 | */ |
393 | */ |
383 | a.full = dfixed_const(256 * 13); |
394 | a.full = dfixed_const(256 * 13); |
384 | chunk_time.full = dfixed_mul(sclk, a); |
395 | chunk_time.full = dfixed_mul(sclk, a); |
385 | a.full = dfixed_const(10); |
396 | a.full = dfixed_const(10); |
386 | chunk_time.full = dfixed_div(chunk_time, a); |
397 | chunk_time.full = dfixed_div(chunk_time, a); |
387 | 398 | ||
388 | /* Determine the worst case latency |
399 | /* Determine the worst case latency |
389 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
400 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
390 | * WorstCaseLatency = worst case time from urgent to when the MC starts |
401 | * WorstCaseLatency = worst case time from urgent to when the MC starts |
391 | * to return data |
402 | * to return data |
392 | * READ_DELAY_IDLE_MAX = constant of 1us |
403 | * READ_DELAY_IDLE_MAX = constant of 1us |
393 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
404 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
394 | * which consists of pipeline delay and inter chunk gap |
405 | * which consists of pipeline delay and inter chunk gap |
395 | */ |
406 | */ |
396 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
407 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
397 | a.full = dfixed_const(3); |
408 | a.full = dfixed_const(3); |
398 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
409 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
399 | wm->worst_case_latency.full += read_delay_latency.full; |
410 | wm->worst_case_latency.full += read_delay_latency.full; |
400 | } else { |
411 | } else { |
401 | a.full = dfixed_const(2); |
412 | a.full = dfixed_const(2); |
402 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
413 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
403 | wm->worst_case_latency.full += read_delay_latency.full; |
414 | wm->worst_case_latency.full += read_delay_latency.full; |
404 | } |
415 | } |
405 | 416 | ||
406 | /* Determine the tolerable latency |
417 | /* Determine the tolerable latency |
407 | * TolerableLatency = Any given request has only 1 line time |
418 | * TolerableLatency = Any given request has only 1 line time |
408 | * for the data to be returned |
419 | * for the data to be returned |
409 | * LBRequestFifoDepth = Number of chunk requests the LB can |
420 | * LBRequestFifoDepth = Number of chunk requests the LB can |
410 | * put into the request FIFO for a display |
421 | * put into the request FIFO for a display |
411 | * LineTime = total time for one line of display |
422 | * LineTime = total time for one line of display |
412 | * ChunkTime = the time it takes the DCP to send one chunk |
423 | * ChunkTime = the time it takes the DCP to send one chunk |
413 | * of data to the LB which consists of |
424 | * of data to the LB which consists of |
414 | * pipeline delay and inter chunk gap |
425 | * pipeline delay and inter chunk gap |
415 | */ |
426 | */ |
416 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
427 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
417 | tolerable_latency.full = line_time.full; |
428 | tolerable_latency.full = line_time.full; |
418 | } else { |
429 | } else { |
419 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
430 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
420 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
431 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
421 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
432 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
422 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
433 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
423 | } |
434 | } |
424 | /* We assume worst case 32bits (4 bytes) */ |
435 | /* We assume worst case 32bits (4 bytes) */ |
425 | wm->dbpp.full = dfixed_const(4 * 8); |
436 | wm->dbpp.full = dfixed_const(4 * 8); |
426 | 437 | ||
427 | /* Determine the maximum priority mark |
438 | /* Determine the maximum priority mark |
428 | * width = viewport width in pixels |
439 | * width = viewport width in pixels |
429 | */ |
440 | */ |
430 | a.full = dfixed_const(16); |
441 | a.full = dfixed_const(16); |
431 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
442 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
432 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
443 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
433 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
444 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
434 | 445 | ||
435 | /* Determine estimated width */ |
446 | /* Determine estimated width */ |
436 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
447 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
437 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
448 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
438 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
449 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
439 | wm->priority_mark.full = dfixed_const(10); |
450 | wm->priority_mark.full = dfixed_const(10); |
440 | } else { |
451 | } else { |
441 | a.full = dfixed_const(16); |
452 | a.full = dfixed_const(16); |
442 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
453 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
443 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
454 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
444 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
455 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
445 | } |
456 | } |
446 | } |
457 | } |
447 | 458 | ||
448 | static void rs690_compute_mode_priority(struct radeon_device *rdev, |
459 | static void rs690_compute_mode_priority(struct radeon_device *rdev, |
449 | struct rs690_watermark *wm0, |
460 | struct rs690_watermark *wm0, |
450 | struct rs690_watermark *wm1, |
461 | struct rs690_watermark *wm1, |
451 | struct drm_display_mode *mode0, |
462 | struct drm_display_mode *mode0, |
452 | struct drm_display_mode *mode1, |
463 | struct drm_display_mode *mode1, |
453 | u32 *d1mode_priority_a_cnt, |
464 | u32 *d1mode_priority_a_cnt, |
454 | u32 *d2mode_priority_a_cnt) |
465 | u32 *d2mode_priority_a_cnt) |
455 | { |
466 | { |
456 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
467 | fixed20_12 priority_mark02, priority_mark12, fill_rate; |
457 | fixed20_12 a, b; |
468 | fixed20_12 a, b; |
458 | 469 | ||
459 | *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); |
470 | *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); |
460 | *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); |
471 | *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); |
461 | 472 | ||
462 | if (mode0 && mode1) { |
473 | if (mode0 && mode1) { |
463 | if (dfixed_trunc(wm0->dbpp) > 64) |
474 | if (dfixed_trunc(wm0->dbpp) > 64) |
464 | a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair); |
475 | a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair); |
465 | else |
476 | else |
466 | a.full = wm0->num_line_pair.full; |
477 | a.full = wm0->num_line_pair.full; |
467 | if (dfixed_trunc(wm1->dbpp) > 64) |
478 | if (dfixed_trunc(wm1->dbpp) > 64) |
468 | b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair); |
479 | b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair); |
469 | else |
480 | else |
470 | b.full = wm1->num_line_pair.full; |
481 | b.full = wm1->num_line_pair.full; |
471 | a.full += b.full; |
482 | a.full += b.full; |
472 | fill_rate.full = dfixed_div(wm0->sclk, a); |
483 | fill_rate.full = dfixed_div(wm0->sclk, a); |
473 | if (wm0->consumption_rate.full > fill_rate.full) { |
484 | if (wm0->consumption_rate.full > fill_rate.full) { |
474 | b.full = wm0->consumption_rate.full - fill_rate.full; |
485 | b.full = wm0->consumption_rate.full - fill_rate.full; |
475 | b.full = dfixed_mul(b, wm0->active_time); |
486 | b.full = dfixed_mul(b, wm0->active_time); |
476 | a.full = dfixed_mul(wm0->worst_case_latency, |
487 | a.full = dfixed_mul(wm0->worst_case_latency, |
477 | wm0->consumption_rate); |
488 | wm0->consumption_rate); |
478 | a.full = a.full + b.full; |
489 | a.full = a.full + b.full; |
479 | b.full = dfixed_const(16 * 1000); |
490 | b.full = dfixed_const(16 * 1000); |
480 | priority_mark02.full = dfixed_div(a, b); |
491 | priority_mark02.full = dfixed_div(a, b); |
481 | } else { |
492 | } else { |
482 | a.full = dfixed_mul(wm0->worst_case_latency, |
493 | a.full = dfixed_mul(wm0->worst_case_latency, |
483 | wm0->consumption_rate); |
494 | wm0->consumption_rate); |
484 | b.full = dfixed_const(16 * 1000); |
495 | b.full = dfixed_const(16 * 1000); |
485 | priority_mark02.full = dfixed_div(a, b); |
496 | priority_mark02.full = dfixed_div(a, b); |
486 | } |
497 | } |
487 | if (wm1->consumption_rate.full > fill_rate.full) { |
498 | if (wm1->consumption_rate.full > fill_rate.full) { |
488 | b.full = wm1->consumption_rate.full - fill_rate.full; |
499 | b.full = wm1->consumption_rate.full - fill_rate.full; |
489 | b.full = dfixed_mul(b, wm1->active_time); |
500 | b.full = dfixed_mul(b, wm1->active_time); |
490 | a.full = dfixed_mul(wm1->worst_case_latency, |
501 | a.full = dfixed_mul(wm1->worst_case_latency, |
491 | wm1->consumption_rate); |
502 | wm1->consumption_rate); |
492 | a.full = a.full + b.full; |
503 | a.full = a.full + b.full; |
493 | b.full = dfixed_const(16 * 1000); |
504 | b.full = dfixed_const(16 * 1000); |
494 | priority_mark12.full = dfixed_div(a, b); |
505 | priority_mark12.full = dfixed_div(a, b); |
495 | } else { |
506 | } else { |
496 | a.full = dfixed_mul(wm1->worst_case_latency, |
507 | a.full = dfixed_mul(wm1->worst_case_latency, |
497 | wm1->consumption_rate); |
508 | wm1->consumption_rate); |
498 | b.full = dfixed_const(16 * 1000); |
509 | b.full = dfixed_const(16 * 1000); |
499 | priority_mark12.full = dfixed_div(a, b); |
510 | priority_mark12.full = dfixed_div(a, b); |
500 | } |
511 | } |
501 | if (wm0->priority_mark.full > priority_mark02.full) |
512 | if (wm0->priority_mark.full > priority_mark02.full) |
502 | priority_mark02.full = wm0->priority_mark.full; |
513 | priority_mark02.full = wm0->priority_mark.full; |
503 | if (wm0->priority_mark_max.full > priority_mark02.full) |
514 | if (wm0->priority_mark_max.full > priority_mark02.full) |
504 | priority_mark02.full = wm0->priority_mark_max.full; |
515 | priority_mark02.full = wm0->priority_mark_max.full; |
505 | if (wm1->priority_mark.full > priority_mark12.full) |
516 | if (wm1->priority_mark.full > priority_mark12.full) |
506 | priority_mark12.full = wm1->priority_mark.full; |
517 | priority_mark12.full = wm1->priority_mark.full; |
507 | if (wm1->priority_mark_max.full > priority_mark12.full) |
518 | if (wm1->priority_mark_max.full > priority_mark12.full) |
508 | priority_mark12.full = wm1->priority_mark_max.full; |
519 | priority_mark12.full = wm1->priority_mark_max.full; |
509 | *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
520 | *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
510 | *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
521 | *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
511 | if (rdev->disp_priority == 2) { |
522 | if (rdev->disp_priority == 2) { |
512 | *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
523 | *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
513 | *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
524 | *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
514 | } |
525 | } |
515 | } else if (mode0) { |
526 | } else if (mode0) { |
516 | if (dfixed_trunc(wm0->dbpp) > 64) |
527 | if (dfixed_trunc(wm0->dbpp) > 64) |
517 | a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair); |
528 | a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair); |
518 | else |
529 | else |
519 | a.full = wm0->num_line_pair.full; |
530 | a.full = wm0->num_line_pair.full; |
520 | fill_rate.full = dfixed_div(wm0->sclk, a); |
531 | fill_rate.full = dfixed_div(wm0->sclk, a); |
521 | if (wm0->consumption_rate.full > fill_rate.full) { |
532 | if (wm0->consumption_rate.full > fill_rate.full) { |
522 | b.full = wm0->consumption_rate.full - fill_rate.full; |
533 | b.full = wm0->consumption_rate.full - fill_rate.full; |
523 | b.full = dfixed_mul(b, wm0->active_time); |
534 | b.full = dfixed_mul(b, wm0->active_time); |
524 | a.full = dfixed_mul(wm0->worst_case_latency, |
535 | a.full = dfixed_mul(wm0->worst_case_latency, |
525 | wm0->consumption_rate); |
536 | wm0->consumption_rate); |
526 | a.full = a.full + b.full; |
537 | a.full = a.full + b.full; |
527 | b.full = dfixed_const(16 * 1000); |
538 | b.full = dfixed_const(16 * 1000); |
528 | priority_mark02.full = dfixed_div(a, b); |
539 | priority_mark02.full = dfixed_div(a, b); |
529 | } else { |
540 | } else { |
530 | a.full = dfixed_mul(wm0->worst_case_latency, |
541 | a.full = dfixed_mul(wm0->worst_case_latency, |
531 | wm0->consumption_rate); |
542 | wm0->consumption_rate); |
532 | b.full = dfixed_const(16 * 1000); |
543 | b.full = dfixed_const(16 * 1000); |
533 | priority_mark02.full = dfixed_div(a, b); |
544 | priority_mark02.full = dfixed_div(a, b); |
534 | } |
545 | } |
535 | if (wm0->priority_mark.full > priority_mark02.full) |
546 | if (wm0->priority_mark.full > priority_mark02.full) |
536 | priority_mark02.full = wm0->priority_mark.full; |
547 | priority_mark02.full = wm0->priority_mark.full; |
537 | if (wm0->priority_mark_max.full > priority_mark02.full) |
548 | if (wm0->priority_mark_max.full > priority_mark02.full) |
538 | priority_mark02.full = wm0->priority_mark_max.full; |
549 | priority_mark02.full = wm0->priority_mark_max.full; |
539 | *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
550 | *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
540 | if (rdev->disp_priority == 2) |
551 | if (rdev->disp_priority == 2) |
541 | *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
552 | *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
542 | } else if (mode1) { |
553 | } else if (mode1) { |
543 | if (dfixed_trunc(wm1->dbpp) > 64) |
554 | if (dfixed_trunc(wm1->dbpp) > 64) |
544 | a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair); |
555 | a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair); |
545 | else |
556 | else |
546 | a.full = wm1->num_line_pair.full; |
557 | a.full = wm1->num_line_pair.full; |
547 | fill_rate.full = dfixed_div(wm1->sclk, a); |
558 | fill_rate.full = dfixed_div(wm1->sclk, a); |
548 | if (wm1->consumption_rate.full > fill_rate.full) { |
559 | if (wm1->consumption_rate.full > fill_rate.full) { |
549 | b.full = wm1->consumption_rate.full - fill_rate.full; |
560 | b.full = wm1->consumption_rate.full - fill_rate.full; |
550 | b.full = dfixed_mul(b, wm1->active_time); |
561 | b.full = dfixed_mul(b, wm1->active_time); |
551 | a.full = dfixed_mul(wm1->worst_case_latency, |
562 | a.full = dfixed_mul(wm1->worst_case_latency, |
552 | wm1->consumption_rate); |
563 | wm1->consumption_rate); |
553 | a.full = a.full + b.full; |
564 | a.full = a.full + b.full; |
554 | b.full = dfixed_const(16 * 1000); |
565 | b.full = dfixed_const(16 * 1000); |
555 | priority_mark12.full = dfixed_div(a, b); |
566 | priority_mark12.full = dfixed_div(a, b); |
556 | } else { |
567 | } else { |
557 | a.full = dfixed_mul(wm1->worst_case_latency, |
568 | a.full = dfixed_mul(wm1->worst_case_latency, |
558 | wm1->consumption_rate); |
569 | wm1->consumption_rate); |
559 | b.full = dfixed_const(16 * 1000); |
570 | b.full = dfixed_const(16 * 1000); |
560 | priority_mark12.full = dfixed_div(a, b); |
571 | priority_mark12.full = dfixed_div(a, b); |
561 | } |
572 | } |
562 | if (wm1->priority_mark.full > priority_mark12.full) |
573 | if (wm1->priority_mark.full > priority_mark12.full) |
563 | priority_mark12.full = wm1->priority_mark.full; |
574 | priority_mark12.full = wm1->priority_mark.full; |
564 | if (wm1->priority_mark_max.full > priority_mark12.full) |
575 | if (wm1->priority_mark_max.full > priority_mark12.full) |
565 | priority_mark12.full = wm1->priority_mark_max.full; |
576 | priority_mark12.full = wm1->priority_mark_max.full; |
566 | *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
577 | *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
567 | if (rdev->disp_priority == 2) |
578 | if (rdev->disp_priority == 2) |
568 | *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
579 | *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
569 | } |
580 | } |
570 | } |
581 | } |
571 | 582 | ||
572 | void rs690_bandwidth_update(struct radeon_device *rdev) |
583 | void rs690_bandwidth_update(struct radeon_device *rdev) |
573 | { |
584 | { |
574 | struct drm_display_mode *mode0 = NULL; |
585 | struct drm_display_mode *mode0 = NULL; |
575 | struct drm_display_mode *mode1 = NULL; |
586 | struct drm_display_mode *mode1 = NULL; |
576 | struct rs690_watermark wm0_high, wm0_low; |
587 | struct rs690_watermark wm0_high, wm0_low; |
577 | struct rs690_watermark wm1_high, wm1_low; |
588 | struct rs690_watermark wm1_high, wm1_low; |
578 | u32 tmp; |
589 | u32 tmp; |
579 | u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; |
590 | u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; |
580 | u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; |
591 | u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; |
581 | 592 | ||
582 | if (!rdev->mode_info.mode_config_initialized) |
593 | if (!rdev->mode_info.mode_config_initialized) |
583 | return; |
594 | return; |
584 | 595 | ||
585 | radeon_update_display_priority(rdev); |
596 | radeon_update_display_priority(rdev); |
586 | 597 | ||
587 | if (rdev->mode_info.crtcs[0]->base.enabled) |
598 | if (rdev->mode_info.crtcs[0]->base.enabled) |
588 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
599 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
589 | if (rdev->mode_info.crtcs[1]->base.enabled) |
600 | if (rdev->mode_info.crtcs[1]->base.enabled) |
590 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
601 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
591 | /* |
602 | /* |
592 | * Set display0/1 priority up in the memory controller for |
603 | * Set display0/1 priority up in the memory controller for |
593 | * modes if the user specifies HIGH for displaypriority |
604 | * modes if the user specifies HIGH for displaypriority |
594 | * option. |
605 | * option. |
595 | */ |
606 | */ |
596 | if ((rdev->disp_priority == 2) && |
607 | if ((rdev->disp_priority == 2) && |
597 | ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { |
608 | ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { |
598 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
609 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
599 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
610 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
600 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
611 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
601 | if (mode0) |
612 | if (mode0) |
602 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
613 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
603 | if (mode1) |
614 | if (mode1) |
604 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); |
615 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); |
605 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); |
616 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); |
606 | } |
617 | } |
607 | rs690_line_buffer_adjust(rdev, mode0, mode1); |
618 | rs690_line_buffer_adjust(rdev, mode0, mode1); |
608 | 619 | ||
609 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
620 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
610 | WREG32(R_006C9C_DCP_CONTROL, 0); |
621 | WREG32(R_006C9C_DCP_CONTROL, 0); |
611 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
622 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
612 | WREG32(R_006C9C_DCP_CONTROL, 2); |
623 | WREG32(R_006C9C_DCP_CONTROL, 2); |
613 | 624 | ||
614 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false); |
625 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false); |
615 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false); |
626 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false); |
616 | 627 | ||
617 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true); |
628 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true); |
618 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true); |
629 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true); |
619 | 630 | ||
620 | tmp = (wm0_high.lb_request_fifo_depth - 1); |
631 | tmp = (wm0_high.lb_request_fifo_depth - 1); |
621 | tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16; |
632 | tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16; |
622 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
633 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
623 | 634 | ||
624 | rs690_compute_mode_priority(rdev, |
635 | rs690_compute_mode_priority(rdev, |
625 | &wm0_high, &wm1_high, |
636 | &wm0_high, &wm1_high, |
626 | mode0, mode1, |
637 | mode0, mode1, |
627 | &d1mode_priority_a_cnt, &d2mode_priority_a_cnt); |
638 | &d1mode_priority_a_cnt, &d2mode_priority_a_cnt); |
628 | rs690_compute_mode_priority(rdev, |
639 | rs690_compute_mode_priority(rdev, |
629 | &wm0_low, &wm1_low, |
640 | &wm0_low, &wm1_low, |
630 | mode0, mode1, |
641 | mode0, mode1, |
631 | &d1mode_priority_b_cnt, &d2mode_priority_b_cnt); |
642 | &d1mode_priority_b_cnt, &d2mode_priority_b_cnt); |
632 | 643 | ||
633 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
644 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
634 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt); |
645 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt); |
635 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
646 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
636 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt); |
647 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt); |
637 | } |
648 | } |
638 | 649 | ||
639 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
650 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
640 | { |
651 | { |
641 | unsigned long flags; |
652 | unsigned long flags; |
642 | uint32_t r; |
653 | uint32_t r; |
643 | 654 | ||
644 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
655 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
645 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
656 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
646 | r = RREG32(R_00007C_MC_DATA); |
657 | r = RREG32(R_00007C_MC_DATA); |
647 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
658 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
648 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
659 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
649 | return r; |
660 | return r; |
650 | } |
661 | } |
651 | 662 | ||
652 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
663 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
653 | { |
664 | { |
654 | unsigned long flags; |
665 | unsigned long flags; |
655 | 666 | ||
656 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
667 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
657 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
668 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
658 | S_000078_MC_IND_WR_EN(1)); |
669 | S_000078_MC_IND_WR_EN(1)); |
659 | WREG32(R_00007C_MC_DATA, v); |
670 | WREG32(R_00007C_MC_DATA, v); |
660 | WREG32(R_000078_MC_INDEX, 0x7F); |
671 | WREG32(R_000078_MC_INDEX, 0x7F); |
661 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
672 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
662 | } |
673 | } |
663 | 674 | ||
664 | static void rs690_mc_program(struct radeon_device *rdev) |
675 | static void rs690_mc_program(struct radeon_device *rdev) |
665 | { |
676 | { |
666 | struct rv515_mc_save save; |
677 | struct rv515_mc_save save; |
667 | 678 | ||
668 | /* Stops all mc clients */ |
679 | /* Stops all mc clients */ |
669 | rv515_mc_stop(rdev, &save); |
680 | rv515_mc_stop(rdev, &save); |
670 | 681 | ||
671 | /* Wait for mc idle */ |
682 | /* Wait for mc idle */ |
672 | if (rs690_mc_wait_for_idle(rdev)) |
683 | if (rs690_mc_wait_for_idle(rdev)) |
673 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
684 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
674 | /* Program MC, should be a 32bits limited address space */ |
685 | /* Program MC, should be a 32bits limited address space */ |
675 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, |
686 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, |
676 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | |
687 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | |
677 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
688 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
678 | WREG32(R_000134_HDP_FB_LOCATION, |
689 | WREG32(R_000134_HDP_FB_LOCATION, |
679 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); |
690 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); |
680 | 691 | ||
681 | rv515_mc_resume(rdev, &save); |
692 | rv515_mc_resume(rdev, &save); |
682 | } |
693 | } |
683 | 694 | ||
684 | static int rs690_startup(struct radeon_device *rdev) |
695 | static int rs690_startup(struct radeon_device *rdev) |
685 | { |
696 | { |
686 | int r; |
697 | int r; |
687 | 698 | ||
688 | rs690_mc_program(rdev); |
699 | rs690_mc_program(rdev); |
689 | /* Resume clock */ |
700 | /* Resume clock */ |
690 | rv515_clock_startup(rdev); |
701 | rv515_clock_startup(rdev); |
691 | /* Initialize GPU configuration (# pipes, ...) */ |
702 | /* Initialize GPU configuration (# pipes, ...) */ |
692 | rs690_gpu_init(rdev); |
703 | rs690_gpu_init(rdev); |
693 | /* Initialize GART (initialize after TTM so we can allocate |
704 | /* Initialize GART (initialize after TTM so we can allocate |
694 | * memory through TTM but finalize after TTM) */ |
705 | * memory through TTM but finalize after TTM) */ |
695 | r = rs400_gart_enable(rdev); |
706 | r = rs400_gart_enable(rdev); |
696 | if (r) |
707 | if (r) |
697 | return r; |
708 | return r; |
698 | 709 | ||
699 | /* allocate wb buffer */ |
710 | /* allocate wb buffer */ |
700 | r = radeon_wb_init(rdev); |
711 | r = radeon_wb_init(rdev); |
701 | if (r) |
712 | if (r) |
702 | return r; |
713 | return r; |
703 | 714 | ||
704 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
715 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
705 | if (r) { |
716 | if (r) { |
706 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
717 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
707 | return r; |
718 | return r; |
708 | } |
719 | } |
709 | 720 | ||
710 | /* Enable IRQ */ |
721 | /* Enable IRQ */ |
711 | if (!rdev->irq.installed) { |
722 | if (!rdev->irq.installed) { |
712 | r = radeon_irq_kms_init(rdev); |
723 | r = radeon_irq_kms_init(rdev); |
713 | if (r) |
724 | if (r) |
714 | return r; |
725 | return r; |
715 | } |
726 | } |
716 | 727 | ||
717 | rs600_irq_set(rdev); |
728 | rs600_irq_set(rdev); |
718 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
729 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
719 | /* 1M ring buffer */ |
730 | /* 1M ring buffer */ |
720 | r = r100_cp_init(rdev, 1024 * 1024); |
731 | r = r100_cp_init(rdev, 1024 * 1024); |
721 | if (r) { |
732 | if (r) { |
722 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
733 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
723 | return r; |
734 | return r; |
724 | } |
735 | } |
725 | 736 | ||
726 | r = radeon_ib_pool_init(rdev); |
737 | r = radeon_ib_pool_init(rdev); |
727 | if (r) { |
738 | if (r) { |
728 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
739 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
729 | return r; |
740 | return r; |
730 | } |
741 | } |
731 | 742 | ||
732 | r = r600_audio_init(rdev); |
743 | r = radeon_audio_init(rdev); |
733 | if (r) { |
744 | if (r) { |
734 | dev_err(rdev->dev, "failed initializing audio\n"); |
745 | dev_err(rdev->dev, "failed initializing audio\n"); |
735 | return r; |
746 | return r; |
736 | } |
747 | } |
737 | 748 | ||
738 | return 0; |
749 | return 0; |
739 | } |
750 | } |
740 | 751 | ||
741 | 752 | ||
- | 753 | ||
- | 754 | void rs690_fini(struct radeon_device *rdev) |
|
- | 755 | { |
|
- | 756 | radeon_pm_fini(rdev); |
|
- | 757 | radeon_audio_fini(rdev); |
|
- | 758 | r100_cp_fini(rdev); |
|
- | 759 | radeon_wb_fini(rdev); |
|
- | 760 | radeon_ib_pool_fini(rdev); |
|
- | 761 | radeon_gem_fini(rdev); |
|
- | 762 | rs400_gart_fini(rdev); |
|
- | 763 | radeon_irq_kms_fini(rdev); |
|
- | 764 | radeon_fence_driver_fini(rdev); |
|
- | 765 | radeon_bo_fini(rdev); |
|
- | 766 | radeon_atombios_fini(rdev); |
|
- | 767 | kfree(rdev->bios); |
|
- | 768 | rdev->bios = NULL; |
|
742 | 769 | } |
|
743 | 770 | ||
744 | int rs690_init(struct radeon_device *rdev) |
771 | int rs690_init(struct radeon_device *rdev) |
745 | { |
772 | { |
746 | int r; |
773 | int r; |
747 | 774 | ||
748 | /* Disable VGA */ |
775 | /* Disable VGA */ |
749 | rv515_vga_render_disable(rdev); |
776 | rv515_vga_render_disable(rdev); |
750 | /* Initialize scratch registers */ |
777 | /* Initialize scratch registers */ |
751 | radeon_scratch_init(rdev); |
778 | radeon_scratch_init(rdev); |
752 | /* Initialize surface registers */ |
779 | /* Initialize surface registers */ |
753 | radeon_surface_init(rdev); |
780 | radeon_surface_init(rdev); |
754 | /* restore some register to sane defaults */ |
781 | /* restore some register to sane defaults */ |
755 | r100_restore_sanity(rdev); |
782 | r100_restore_sanity(rdev); |
756 | /* TODO: disable VGA need to use VGA request */ |
783 | /* TODO: disable VGA need to use VGA request */ |
757 | /* BIOS*/ |
784 | /* BIOS*/ |
758 | if (!radeon_get_bios(rdev)) { |
785 | if (!radeon_get_bios(rdev)) { |
759 | if (ASIC_IS_AVIVO(rdev)) |
786 | if (ASIC_IS_AVIVO(rdev)) |
760 | return -EINVAL; |
787 | return -EINVAL; |
761 | } |
788 | } |
762 | if (rdev->is_atom_bios) { |
789 | if (rdev->is_atom_bios) { |
763 | r = radeon_atombios_init(rdev); |
790 | r = radeon_atombios_init(rdev); |
764 | if (r) |
791 | if (r) |
765 | return r; |
792 | return r; |
766 | } else { |
793 | } else { |
767 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); |
794 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); |
768 | return -EINVAL; |
795 | return -EINVAL; |
769 | } |
796 | } |
770 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
797 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
771 | if (radeon_asic_reset(rdev)) { |
798 | if (radeon_asic_reset(rdev)) { |
772 | dev_warn(rdev->dev, |
799 | dev_warn(rdev->dev, |
773 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
800 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
774 | RREG32(R_000E40_RBBM_STATUS), |
801 | RREG32(R_000E40_RBBM_STATUS), |
775 | RREG32(R_0007C0_CP_STAT)); |
802 | RREG32(R_0007C0_CP_STAT)); |
776 | } |
803 | } |
777 | /* check if cards are posted or not */ |
804 | /* check if cards are posted or not */ |
778 | if (radeon_boot_test_post_card(rdev) == false) |
805 | if (radeon_boot_test_post_card(rdev) == false) |
779 | return -EINVAL; |
806 | return -EINVAL; |
780 | 807 | ||
781 | /* Initialize clocks */ |
808 | /* Initialize clocks */ |
782 | radeon_get_clock_info(rdev->ddev); |
809 | radeon_get_clock_info(rdev->ddev); |
783 | /* initialize memory controller */ |
810 | /* initialize memory controller */ |
784 | rs690_mc_init(rdev); |
811 | rs690_mc_init(rdev); |
785 | rv515_debugfs(rdev); |
812 | rv515_debugfs(rdev); |
786 | /* Fence driver */ |
813 | /* Fence driver */ |
787 | r = radeon_fence_driver_init(rdev); |
814 | r = radeon_fence_driver_init(rdev); |
788 | if (r) |
815 | if (r) |
789 | return r; |
816 | return r; |
790 | /* Memory manager */ |
817 | /* Memory manager */ |
791 | r = radeon_bo_init(rdev); |
818 | r = radeon_bo_init(rdev); |
792 | if (r) |
819 | if (r) |
793 | return r; |
820 | return r; |
794 | r = rs400_gart_init(rdev); |
821 | r = rs400_gart_init(rdev); |
795 | if (r) |
822 | if (r) |
796 | return r; |
823 | return r; |
797 | rs600_set_safe_registers(rdev); |
824 | rs600_set_safe_registers(rdev); |
798 | 825 | ||
799 | /* Initialize power management */ |
826 | /* Initialize power management */ |
800 | radeon_pm_init(rdev); |
827 | radeon_pm_init(rdev); |
801 | 828 | ||
802 | rdev->accel_working = true; |
829 | rdev->accel_working = true; |
803 | r = rs690_startup(rdev); |
830 | r = rs690_startup(rdev); |
804 | if (r) { |
831 | if (r) { |
805 | /* Somethings want wront with the accel init stop accel */ |
832 | /* Somethings want wront with the accel init stop accel */ |
806 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
833 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
807 | // r100_cp_fini(rdev); |
834 | r100_cp_fini(rdev); |
808 | // r100_wb_fini(rdev); |
835 | radeon_wb_fini(rdev); |
809 | // r100_ib_fini(rdev); |
836 | radeon_ib_pool_fini(rdev); |
810 | rs400_gart_fini(rdev); |
837 | rs400_gart_fini(rdev); |
811 | // radeon_irq_kms_fini(rdev); |
838 | radeon_irq_kms_fini(rdev); |
812 | rdev->accel_working = false; |
839 | rdev->accel_working = false; |
813 | } |
840 | } |
814 | return 0; |
841 | return 0; |
815 | }><>>>><>><>>> |
842 | }><>>>><>><>>> |