Rev 1120 | Rev 1128 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1120 | Rev 1125 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | //#include "drmP.h" |
28 | #include "drmP.h" |
29 | #include "radeon_reg.h" |
29 | #include "radeon_reg.h" |
30 | #include "radeon.h" |
30 | #include "radeon.h" |
31 | 31 | ||
32 | /* r520,rv530,rv560,rv570,r580 depends on : */ |
32 | /* r520,rv530,rv560,rv570,r580 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); |
33 | void r100_hdp_reset(struct radeon_device *rdev); |
34 | int rv370_pcie_gart_enable(struct radeon_device *rdev); |
34 | int rv370_pcie_gart_enable(struct radeon_device *rdev); |
35 | void rv370_pcie_gart_disable(struct radeon_device *rdev); |
35 | void rv370_pcie_gart_disable(struct radeon_device *rdev); |
36 | void r420_pipes_init(struct radeon_device *rdev); |
36 | void r420_pipes_init(struct radeon_device *rdev); |
37 | void rs600_mc_disable_clients(struct radeon_device *rdev); |
37 | void rs600_mc_disable_clients(struct radeon_device *rdev); |
38 | void rs600_disable_vga(struct radeon_device *rdev); |
38 | void rs600_disable_vga(struct radeon_device *rdev); |
39 | int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); |
39 | int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); |
40 | int rv515_debugfs_ga_info_init(struct radeon_device *rdev); |
40 | int rv515_debugfs_ga_info_init(struct radeon_device *rdev); |
41 | 41 | ||
42 | /* This files gather functions specifics to: |
42 | /* This files gather functions specifics to: |
43 | * r520,rv530,rv560,rv570,r580 |
43 | * r520,rv530,rv560,rv570,r580 |
44 | * |
44 | * |
45 | * Some of these functions might be used by newer ASICs. |
45 | * Some of these functions might be used by newer ASICs. |
46 | */ |
46 | */ |
47 | void r520_gpu_init(struct radeon_device *rdev); |
47 | void r520_gpu_init(struct radeon_device *rdev); |
48 | int r520_mc_wait_for_idle(struct radeon_device *rdev); |
48 | int r520_mc_wait_for_idle(struct radeon_device *rdev); |
49 | 49 | ||
50 | /* |
50 | /* |
51 | * MC |
51 | * MC |
52 | */ |
52 | */ |
53 | int r520_mc_init(struct radeon_device *rdev) |
53 | int r520_mc_init(struct radeon_device *rdev) |
54 | { |
54 | { |
55 | uint32_t tmp; |
55 | uint32_t tmp; |
56 | int r; |
56 | int r; |
57 | 57 | ||
58 | dbgprintf("%s\n",__FUNCTION__); |
58 | dbgprintf("%s\n",__FUNCTION__); |
59 | 59 | ||
60 | // if (r100_debugfs_rbbm_init(rdev)) { |
60 | // if (r100_debugfs_rbbm_init(rdev)) { |
61 | // DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
61 | // DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
62 | // } |
62 | // } |
63 | // if (rv515_debugfs_pipes_info_init(rdev)) { |
63 | // if (rv515_debugfs_pipes_info_init(rdev)) { |
64 | // DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
64 | // DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
65 | // } |
65 | // } |
66 | // if (rv515_debugfs_ga_info_init(rdev)) { |
66 | // if (rv515_debugfs_ga_info_init(rdev)) { |
67 | // DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
67 | // DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
68 | // } |
68 | // } |
69 | 69 | ||
70 | r520_gpu_init(rdev); |
70 | r520_gpu_init(rdev); |
71 | rv370_pcie_gart_disable(rdev); |
71 | rv370_pcie_gart_disable(rdev); |
72 | 72 | ||
73 | /* Setup GPU memory space */ |
73 | /* Setup GPU memory space */ |
74 | rdev->mc.vram_location = 0xFFFFFFFFUL; |
74 | rdev->mc.vram_location = 0xFFFFFFFFUL; |
75 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
75 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
76 | if (rdev->flags & RADEON_IS_AGP) { |
76 | if (rdev->flags & RADEON_IS_AGP) { |
77 | r = radeon_agp_init(rdev); |
77 | r = radeon_agp_init(rdev); |
78 | if (r) { |
78 | if (r) { |
79 | printk(KERN_WARNING "[drm] Disabling AGP\n"); |
79 | printk(KERN_WARNING "[drm] Disabling AGP\n"); |
80 | rdev->flags &= ~RADEON_IS_AGP; |
80 | rdev->flags &= ~RADEON_IS_AGP; |
81 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
81 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
82 | } else { |
82 | } else { |
83 | rdev->mc.gtt_location = rdev->mc.agp_base; |
83 | rdev->mc.gtt_location = rdev->mc.agp_base; |
84 | } |
84 | } |
85 | } |
85 | } |
86 | r = radeon_mc_setup(rdev); |
86 | r = radeon_mc_setup(rdev); |
87 | if (r) { |
87 | if (r) { |
88 | return r; |
88 | return r; |
89 | } |
89 | } |
90 | 90 | ||
91 | /* Program GPU memory space */ |
91 | /* Program GPU memory space */ |
92 | rs600_mc_disable_clients(rdev); |
92 | rs600_mc_disable_clients(rdev); |
93 | if (r520_mc_wait_for_idle(rdev)) { |
93 | if (r520_mc_wait_for_idle(rdev)) { |
94 | printk(KERN_WARNING "Failed to wait MC idle while " |
94 | printk(KERN_WARNING "Failed to wait MC idle while " |
95 | "programming pipes. Bad things might happen.\n"); |
95 | "programming pipes. Bad things might happen.\n"); |
96 | } |
96 | } |
97 | /* Write VRAM size in case we are limiting it */ |
97 | /* Write VRAM size in case we are limiting it */ |
98 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
98 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
99 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
99 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
100 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); |
100 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); |
101 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); |
101 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); |
102 | WREG32_MC(R520_MC_FB_LOCATION, tmp); |
102 | WREG32_MC(R520_MC_FB_LOCATION, tmp); |
103 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
103 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
104 | WREG32(0x310, rdev->mc.vram_location); |
104 | WREG32(0x310, rdev->mc.vram_location); |
105 | if (rdev->flags & RADEON_IS_AGP) { |
105 | if (rdev->flags & RADEON_IS_AGP) { |
106 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
106 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
107 | tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16); |
107 | tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16); |
108 | tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16); |
108 | tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16); |
109 | WREG32_MC(R520_MC_AGP_LOCATION, tmp); |
109 | WREG32_MC(R520_MC_AGP_LOCATION, tmp); |
110 | WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base); |
110 | WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base); |
111 | WREG32_MC(R520_MC_AGP_BASE_2, 0); |
111 | WREG32_MC(R520_MC_AGP_BASE_2, 0); |
112 | } else { |
112 | } else { |
113 | WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF); |
113 | WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF); |
114 | WREG32_MC(R520_MC_AGP_BASE, 0); |
114 | WREG32_MC(R520_MC_AGP_BASE, 0); |
115 | WREG32_MC(R520_MC_AGP_BASE_2, 0); |
115 | WREG32_MC(R520_MC_AGP_BASE_2, 0); |
116 | } |
116 | } |
117 | 117 | ||
118 | dbgprintf("done: %s\n",__FUNCTION__); |
118 | dbgprintf("done: %s\n",__FUNCTION__); |
119 | 119 | ||
120 | return 0; |
120 | return 0; |
121 | } |
121 | } |
122 | 122 | ||
123 | void r520_mc_fini(struct radeon_device *rdev) |
123 | void r520_mc_fini(struct radeon_device *rdev) |
124 | { |
124 | { |
125 | rv370_pcie_gart_disable(rdev); |
125 | rv370_pcie_gart_disable(rdev); |
126 | radeon_gart_table_vram_free(rdev); |
126 | radeon_gart_table_vram_free(rdev); |
127 | radeon_gart_fini(rdev); |
127 | radeon_gart_fini(rdev); |
128 | } |
128 | } |
129 | 129 | ||
130 | 130 | ||
131 | /* |
131 | /* |
132 | * Global GPU functions |
132 | * Global GPU functions |
133 | */ |
133 | */ |
134 | void r520_errata(struct radeon_device *rdev) |
134 | void r520_errata(struct radeon_device *rdev) |
135 | { |
135 | { |
136 | rdev->pll_errata = 0; |
136 | rdev->pll_errata = 0; |
137 | } |
137 | } |
138 | 138 | ||
139 | int r520_mc_wait_for_idle(struct radeon_device *rdev) |
139 | int r520_mc_wait_for_idle(struct radeon_device *rdev) |
140 | { |
140 | { |
141 | unsigned i; |
141 | unsigned i; |
142 | uint32_t tmp; |
142 | uint32_t tmp; |
143 | 143 | ||
144 | for (i = 0; i < rdev->usec_timeout; i++) { |
144 | for (i = 0; i < rdev->usec_timeout; i++) { |
145 | /* read MC_STATUS */ |
145 | /* read MC_STATUS */ |
146 | tmp = RREG32_MC(R520_MC_STATUS); |
146 | tmp = RREG32_MC(R520_MC_STATUS); |
147 | if (tmp & R520_MC_STATUS_IDLE) { |
147 | if (tmp & R520_MC_STATUS_IDLE) { |
148 | return 0; |
148 | return 0; |
149 | } |
149 | } |
150 | DRM_UDELAY(1); |
150 | DRM_UDELAY(1); |
151 | } |
151 | } |
152 | return -1; |
152 | return -1; |
153 | } |
153 | } |
154 | 154 | ||
155 | void r520_gpu_init(struct radeon_device *rdev) |
155 | void r520_gpu_init(struct radeon_device *rdev) |
156 | { |
156 | { |
157 | unsigned pipe_select_current, gb_pipe_select, tmp; |
157 | unsigned pipe_select_current, gb_pipe_select, tmp; |
158 | dbgprintf("%s\n",__FUNCTION__); |
158 | dbgprintf("%s\n",__FUNCTION__); |
159 | 159 | ||
160 | r100_hdp_reset(rdev); |
160 | r100_hdp_reset(rdev); |
161 | rs600_disable_vga(rdev); |
161 | rs600_disable_vga(rdev); |
162 | /* |
162 | /* |
163 | * DST_PIPE_CONFIG 0x170C |
163 | * DST_PIPE_CONFIG 0x170C |
164 | * GB_TILE_CONFIG 0x4018 |
164 | * GB_TILE_CONFIG 0x4018 |
165 | * GB_FIFO_SIZE 0x4024 |
165 | * GB_FIFO_SIZE 0x4024 |
166 | * GB_PIPE_SELECT 0x402C |
166 | * GB_PIPE_SELECT 0x402C |
167 | * GB_PIPE_SELECT2 0x4124 |
167 | * GB_PIPE_SELECT2 0x4124 |
168 | * Z_PIPE_SHIFT 0 |
168 | * Z_PIPE_SHIFT 0 |
169 | * Z_PIPE_MASK 0x000000003 |
169 | * Z_PIPE_MASK 0x000000003 |
170 | * GB_FIFO_SIZE2 0x4128 |
170 | * GB_FIFO_SIZE2 0x4128 |
171 | * SC_SFIFO_SIZE_SHIFT 0 |
171 | * SC_SFIFO_SIZE_SHIFT 0 |
172 | * SC_SFIFO_SIZE_MASK 0x000000003 |
172 | * SC_SFIFO_SIZE_MASK 0x000000003 |
173 | * SC_MFIFO_SIZE_SHIFT 2 |
173 | * SC_MFIFO_SIZE_SHIFT 2 |
174 | * SC_MFIFO_SIZE_MASK 0x00000000C |
174 | * SC_MFIFO_SIZE_MASK 0x00000000C |
175 | * FG_SFIFO_SIZE_SHIFT 4 |
175 | * FG_SFIFO_SIZE_SHIFT 4 |
176 | * FG_SFIFO_SIZE_MASK 0x000000030 |
176 | * FG_SFIFO_SIZE_MASK 0x000000030 |
177 | * ZB_MFIFO_SIZE_SHIFT 6 |
177 | * ZB_MFIFO_SIZE_SHIFT 6 |
178 | * ZB_MFIFO_SIZE_MASK 0x0000000C0 |
178 | * ZB_MFIFO_SIZE_MASK 0x0000000C0 |
179 | * GA_ENHANCE 0x4274 |
179 | * GA_ENHANCE 0x4274 |
180 | * SU_REG_DEST 0x42C8 |
180 | * SU_REG_DEST 0x42C8 |
181 | */ |
181 | */ |
182 | /* workaround for RV530 */ |
182 | /* workaround for RV530 */ |
183 | if (rdev->family == CHIP_RV530) { |
183 | if (rdev->family == CHIP_RV530) { |
184 | WREG32(0x4124, 1); |
184 | WREG32(0x4124, 1); |
185 | WREG32(0x4128, 0xFF); |
185 | WREG32(0x4128, 0xFF); |
186 | } |
186 | } |
187 | r420_pipes_init(rdev); |
187 | r420_pipes_init(rdev); |
188 | gb_pipe_select = RREG32(0x402C); |
188 | gb_pipe_select = RREG32(0x402C); |
189 | tmp = RREG32(0x170C); |
189 | tmp = RREG32(0x170C); |
190 | pipe_select_current = (tmp >> 2) & 3; |
190 | pipe_select_current = (tmp >> 2) & 3; |
191 | tmp = (1 << pipe_select_current) | |
191 | tmp = (1 << pipe_select_current) | |
192 | (((gb_pipe_select >> 8) & 0xF) << 4); |
192 | (((gb_pipe_select >> 8) & 0xF) << 4); |
193 | WREG32_PLL(0x000D, tmp); |
193 | WREG32_PLL(0x000D, tmp); |
194 | if (r520_mc_wait_for_idle(rdev)) { |
194 | if (r520_mc_wait_for_idle(rdev)) { |
195 | printk(KERN_WARNING "Failed to wait MC idle while " |
195 | printk(KERN_WARNING "Failed to wait MC idle while " |
196 | "programming pipes. Bad things might happen.\n"); |
196 | "programming pipes. Bad things might happen.\n"); |
197 | } |
197 | } |
198 | } |
198 | } |
199 | 199 | ||
200 | 200 | ||
201 | /* |
201 | /* |
202 | * VRAM info |
202 | * VRAM info |
203 | */ |
203 | */ |
204 | static void r520_vram_get_type(struct radeon_device *rdev) |
204 | static void r520_vram_get_type(struct radeon_device *rdev) |
205 | { |
205 | { |
206 | uint32_t tmp; |
206 | uint32_t tmp; |
207 | dbgprintf("%s\n",__FUNCTION__); |
207 | dbgprintf("%s\n",__FUNCTION__); |
208 | 208 | ||
209 | rdev->mc.vram_width = 128; |
209 | rdev->mc.vram_width = 128; |
210 | rdev->mc.vram_is_ddr = true; |
210 | rdev->mc.vram_is_ddr = true; |
211 | tmp = RREG32_MC(R520_MC_CNTL0); |
211 | tmp = RREG32_MC(R520_MC_CNTL0); |
212 | switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) { |
212 | switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) { |
213 | case 0: |
213 | case 0: |
214 | rdev->mc.vram_width = 32; |
214 | rdev->mc.vram_width = 32; |
215 | break; |
215 | break; |
216 | case 1: |
216 | case 1: |
217 | rdev->mc.vram_width = 64; |
217 | rdev->mc.vram_width = 64; |
218 | break; |
218 | break; |
219 | case 2: |
219 | case 2: |
220 | rdev->mc.vram_width = 128; |
220 | rdev->mc.vram_width = 128; |
221 | break; |
221 | break; |
222 | case 3: |
222 | case 3: |
223 | rdev->mc.vram_width = 256; |
223 | rdev->mc.vram_width = 256; |
224 | break; |
224 | break; |
225 | default: |
225 | default: |
226 | rdev->mc.vram_width = 128; |
226 | rdev->mc.vram_width = 128; |
227 | break; |
227 | break; |
228 | } |
228 | } |
229 | if (tmp & R520_MC_CHANNEL_SIZE) |
229 | if (tmp & R520_MC_CHANNEL_SIZE) |
230 | rdev->mc.vram_width *= 2; |
230 | rdev->mc.vram_width *= 2; |
231 | } |
231 | } |
232 | 232 | ||
233 | void r520_vram_info(struct radeon_device *rdev) |
233 | void r520_vram_info(struct radeon_device *rdev) |
234 | { |
234 | { |
235 | r520_vram_get_type(rdev); |
235 | r520_vram_get_type(rdev); |
236 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
236 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
237 | 237 | ||
238 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
238 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
239 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
239 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
240 | } |
240 | } |
241 | 241 | ||
242 | /* |
242 | /* |
243 | * Global GPU functions |
243 | * Global GPU functions |
244 | */ |
244 | */ |
245 | void rs600_disable_vga(struct radeon_device *rdev) |
245 | void rs600_disable_vga(struct radeon_device *rdev) |
246 | { |
246 | { |
247 | unsigned tmp; |
247 | unsigned tmp; |
248 | dbgprintf("%s\n",__FUNCTION__); |
248 | dbgprintf("%s\n",__FUNCTION__); |
249 | 249 | ||
250 | WREG32(0x330, 0); |
250 | WREG32(0x330, 0); |
251 | WREG32(0x338, 0); |
251 | WREG32(0x338, 0); |
252 | tmp = RREG32(0x300); |
252 | tmp = RREG32(0x300); |
253 | tmp &= ~(3 << 16); |
253 | tmp &= ~(3 << 16); |
254 | WREG32(0x300, tmp); |
254 | WREG32(0x300, tmp); |
255 | WREG32(0x308, (1 << 8)); |
255 | WREG32(0x308, (1 << 8)); |
256 | WREG32(0x310, rdev->mc.vram_location); |
256 | WREG32(0x310, rdev->mc.vram_location); |
257 | WREG32(0x594, 0); |
257 | WREG32(0x594, 0); |
258 | } |
258 | } |
259 | 259 | ||
260 | 260 | ||
261 | void r420_pipes_init(struct radeon_device *rdev) |
261 | void r420_pipes_init(struct radeon_device *rdev) |
262 | { |
262 | { |
263 | unsigned tmp; |
263 | unsigned tmp; |
264 | unsigned gb_pipe_select; |
264 | unsigned gb_pipe_select; |
265 | unsigned num_pipes; |
265 | unsigned num_pipes; |
266 | 266 | ||
267 | dbgprintf("%s\n",__FUNCTION__); |
267 | dbgprintf("%s\n",__FUNCTION__); |
268 | 268 | ||
269 | /* GA_ENHANCE workaround TCL deadlock issue */ |
269 | /* GA_ENHANCE workaround TCL deadlock issue */ |
270 | WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); |
270 | WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); |
271 | /* get max number of pipes */ |
271 | /* get max number of pipes */ |
272 | gb_pipe_select = RREG32(0x402C); |
272 | gb_pipe_select = RREG32(0x402C); |
273 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
273 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
274 | rdev->num_gb_pipes = num_pipes; |
274 | rdev->num_gb_pipes = num_pipes; |
275 | tmp = 0; |
275 | tmp = 0; |
276 | switch (num_pipes) { |
276 | switch (num_pipes) { |
277 | default: |
277 | default: |
278 | /* force to 1 pipe */ |
278 | /* force to 1 pipe */ |
279 | num_pipes = 1; |
279 | num_pipes = 1; |
280 | case 1: |
280 | case 1: |
281 | tmp = (0 << 1); |
281 | tmp = (0 << 1); |
282 | break; |
282 | break; |
283 | case 2: |
283 | case 2: |
284 | tmp = (3 << 1); |
284 | tmp = (3 << 1); |
285 | break; |
285 | break; |
286 | case 3: |
286 | case 3: |
287 | tmp = (6 << 1); |
287 | tmp = (6 << 1); |
288 | break; |
288 | break; |
289 | case 4: |
289 | case 4: |
290 | tmp = (7 << 1); |
290 | tmp = (7 << 1); |
291 | break; |
291 | break; |
292 | } |
292 | } |
293 | WREG32(0x42C8, (1 << num_pipes) - 1); |
293 | WREG32(0x42C8, (1 << num_pipes) - 1); |
294 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
294 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
295 | tmp |= (1 << 4) | (1 << 0); |
295 | tmp |= (1 << 4) | (1 << 0); |
296 | WREG32(0x4018, tmp); |
296 | WREG32(0x4018, tmp); |
297 | if (r100_gui_wait_for_idle(rdev)) { |
297 | if (r100_gui_wait_for_idle(rdev)) { |
298 | printk(KERN_WARNING "Failed to wait GUI idle while " |
298 | printk(KERN_WARNING "Failed to wait GUI idle while " |
299 | "programming pipes. Bad things might happen.\n"); |
299 | "programming pipes. Bad things might happen.\n"); |
300 | } |
300 | } |
301 | 301 | ||
302 | tmp = RREG32(0x170C); |
302 | tmp = RREG32(0x170C); |
303 | WREG32(0x170C, tmp | (1 << 31)); |
303 | WREG32(0x170C, tmp | (1 << 31)); |
304 | 304 | ||
305 | WREG32(R300_RB2D_DSTCACHE_MODE, |
305 | WREG32(R300_RB2D_DSTCACHE_MODE, |
306 | RREG32(R300_RB2D_DSTCACHE_MODE) | |
306 | RREG32(R300_RB2D_DSTCACHE_MODE) | |
307 | R300_DC_AUTOFLUSH_ENABLE | |
307 | R300_DC_AUTOFLUSH_ENABLE | |
308 | R300_DC_DC_DISABLE_IGNORE_PE); |
308 | R300_DC_DC_DISABLE_IGNORE_PE); |
309 | 309 | ||
310 | if (r100_gui_wait_for_idle(rdev)) { |
310 | if (r100_gui_wait_for_idle(rdev)) { |
311 | printk(KERN_WARNING "Failed to wait GUI idle while " |
311 | printk(KERN_WARNING "Failed to wait GUI idle while " |
312 | "programming pipes. Bad things might happen.\n"); |
312 | "programming pipes. Bad things might happen.\n"); |
313 | } |
313 | } |
314 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); |
314 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); |
315 | } |
315 | } |
316 | 316 | ||
317 | 317 | ||
318 | int radeon_agp_init(struct radeon_device *rdev) |
318 | int radeon_agp_init(struct radeon_device *rdev) |
319 | { |
319 | { |
320 | 320 | ||
321 | dbgprintf("%s\n",__FUNCTION__); |
321 | dbgprintf("%s\n",__FUNCTION__); |
322 | 322 | ||
323 | #if __OS_HAS_AGP |
323 | #if __OS_HAS_AGP |
324 | struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; |
324 | struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; |
325 | struct drm_agp_mode mode; |
325 | struct drm_agp_mode mode; |
326 | struct drm_agp_info info; |
326 | struct drm_agp_info info; |
327 | uint32_t agp_status; |
327 | uint32_t agp_status; |
328 | int default_mode; |
328 | int default_mode; |
329 | bool is_v3; |
329 | bool is_v3; |
330 | int ret; |
330 | int ret; |
331 | 331 | ||
332 | /* Acquire AGP. */ |
332 | /* Acquire AGP. */ |
333 | if (!rdev->ddev->agp->acquired) { |
333 | if (!rdev->ddev->agp->acquired) { |
334 | ret = drm_agp_acquire(rdev->ddev); |
334 | ret = drm_agp_acquire(rdev->ddev); |
335 | if (ret) { |
335 | if (ret) { |
336 | DRM_ERROR("Unable to acquire AGP: %d\n", ret); |
336 | DRM_ERROR("Unable to acquire AGP: %d\n", ret); |
337 | return ret; |
337 | return ret; |
338 | } |
338 | } |
339 | } |
339 | } |
340 | 340 | ||
341 | ret = drm_agp_info(rdev->ddev, &info); |
341 | ret = drm_agp_info(rdev->ddev, &info); |
342 | if (ret) { |
342 | if (ret) { |
343 | DRM_ERROR("Unable to get AGP info: %d\n", ret); |
343 | DRM_ERROR("Unable to get AGP info: %d\n", ret); |
344 | return ret; |
344 | return ret; |
345 | } |
345 | } |
346 | mode.mode = info.mode; |
346 | mode.mode = info.mode; |
347 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; |
347 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; |
348 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); |
348 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); |
349 | 349 | ||
350 | if (is_v3) { |
350 | if (is_v3) { |
351 | default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4; |
351 | default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4; |
352 | } else { |
352 | } else { |
353 | if (agp_status & RADEON_AGP_4X_MODE) { |
353 | if (agp_status & RADEON_AGP_4X_MODE) { |
354 | default_mode = 4; |
354 | default_mode = 4; |
355 | } else if (agp_status & RADEON_AGP_2X_MODE) { |
355 | } else if (agp_status & RADEON_AGP_2X_MODE) { |
356 | default_mode = 2; |
356 | default_mode = 2; |
357 | } else { |
357 | } else { |
358 | default_mode = 1; |
358 | default_mode = 1; |
359 | } |
359 | } |
360 | } |
360 | } |
361 | 361 | ||
362 | /* Apply AGPMode Quirks */ |
362 | /* Apply AGPMode Quirks */ |
363 | while (p && p->chip_device != 0) { |
363 | while (p && p->chip_device != 0) { |
364 | if (info.id_vendor == p->hostbridge_vendor && |
364 | if (info.id_vendor == p->hostbridge_vendor && |
365 | info.id_device == p->hostbridge_device && |
365 | info.id_device == p->hostbridge_device && |
366 | rdev->pdev->vendor == p->chip_vendor && |
366 | rdev->pdev->vendor == p->chip_vendor && |
367 | rdev->pdev->device == p->chip_device && |
367 | rdev->pdev->device == p->chip_device && |
368 | rdev->pdev->subsystem_vendor == p->subsys_vendor && |
368 | rdev->pdev->subsystem_vendor == p->subsys_vendor && |
369 | rdev->pdev->subsystem_device == p->subsys_device) { |
369 | rdev->pdev->subsystem_device == p->subsys_device) { |
370 | default_mode = p->default_mode; |
370 | default_mode = p->default_mode; |
371 | } |
371 | } |
372 | ++p; |
372 | ++p; |
373 | } |
373 | } |
374 | 374 | ||
375 | if (radeon_agpmode > 0) { |
375 | if (radeon_agpmode > 0) { |
376 | if ((radeon_agpmode < (is_v3 ? 4 : 1)) || |
376 | if ((radeon_agpmode < (is_v3 ? 4 : 1)) || |
377 | (radeon_agpmode > (is_v3 ? 8 : 4)) || |
377 | (radeon_agpmode > (is_v3 ? 8 : 4)) || |
378 | (radeon_agpmode & (radeon_agpmode - 1))) { |
378 | (radeon_agpmode & (radeon_agpmode - 1))) { |
379 | DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n", |
379 | DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n", |
380 | radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4", |
380 | radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4", |
381 | default_mode); |
381 | default_mode); |
382 | radeon_agpmode = default_mode; |
382 | radeon_agpmode = default_mode; |
383 | } else { |
383 | } else { |
384 | DRM_INFO("AGP mode requested: %d\n", radeon_agpmode); |
384 | DRM_INFO("AGP mode requested: %d\n", radeon_agpmode); |
385 | } |
385 | } |
386 | } else { |
386 | } else { |
387 | radeon_agpmode = default_mode; |
387 | radeon_agpmode = default_mode; |
388 | } |
388 | } |
389 | 389 | ||
390 | mode.mode &= ~RADEON_AGP_MODE_MASK; |
390 | mode.mode &= ~RADEON_AGP_MODE_MASK; |
391 | if (is_v3) { |
391 | if (is_v3) { |
392 | switch (radeon_agpmode) { |
392 | switch (radeon_agpmode) { |
393 | case 8: |
393 | case 8: |
394 | mode.mode |= RADEON_AGPv3_8X_MODE; |
394 | mode.mode |= RADEON_AGPv3_8X_MODE; |
395 | break; |
395 | break; |
396 | case 4: |
396 | case 4: |
397 | default: |
397 | default: |
398 | mode.mode |= RADEON_AGPv3_4X_MODE; |
398 | mode.mode |= RADEON_AGPv3_4X_MODE; |
399 | break; |
399 | break; |
400 | } |
400 | } |
401 | } else { |
401 | } else { |
402 | switch (radeon_agpmode) { |
402 | switch (radeon_agpmode) { |
403 | case 4: |
403 | case 4: |
404 | mode.mode |= RADEON_AGP_4X_MODE; |
404 | mode.mode |= RADEON_AGP_4X_MODE; |
405 | break; |
405 | break; |
406 | case 2: |
406 | case 2: |
407 | mode.mode |= RADEON_AGP_2X_MODE; |
407 | mode.mode |= RADEON_AGP_2X_MODE; |
408 | break; |
408 | break; |
409 | case 1: |
409 | case 1: |
410 | default: |
410 | default: |
411 | mode.mode |= RADEON_AGP_1X_MODE; |
411 | mode.mode |= RADEON_AGP_1X_MODE; |
412 | break; |
412 | break; |
413 | } |
413 | } |
414 | } |
414 | } |
415 | 415 | ||
416 | mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */ |
416 | mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */ |
417 | ret = drm_agp_enable(rdev->ddev, mode); |
417 | ret = drm_agp_enable(rdev->ddev, mode); |
418 | if (ret) { |
418 | if (ret) { |
419 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); |
419 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); |
420 | return ret; |
420 | return ret; |
421 | } |
421 | } |
422 | 422 | ||
423 | rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; |
423 | rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; |
424 | rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; |
424 | rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; |
425 | 425 | ||
426 | /* workaround some hw issues */ |
426 | /* workaround some hw issues */ |
427 | if (rdev->family < CHIP_R200) { |
427 | if (rdev->family < CHIP_R200) { |
428 | WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000); |
428 | WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000); |
429 | } |
429 | } |
430 | return 0; |
430 | return 0; |
431 | #else |
431 | #else |
432 | return 0; |
432 | return 0; |
433 | #endif |
433 | #endif |
434 | } |
434 | } |
435 | 435 | ||
436 | 436 | ||
437 | void rs600_mc_disable_clients(struct radeon_device *rdev) |
437 | void rs600_mc_disable_clients(struct radeon_device *rdev) |
438 | { |
438 | { |
439 | unsigned tmp; |
439 | unsigned tmp; |
440 | dbgprintf("%s\n",__FUNCTION__); |
440 | dbgprintf("%s\n",__FUNCTION__); |
441 | 441 | ||
442 | if (r100_gui_wait_for_idle(rdev)) { |
442 | if (r100_gui_wait_for_idle(rdev)) { |
443 | printk(KERN_WARNING "Failed to wait GUI idle while " |
443 | printk(KERN_WARNING "Failed to wait GUI idle while " |
444 | "programming pipes. Bad things might happen.\n"); |
444 | "programming pipes. Bad things might happen.\n"); |
445 | } |
445 | } |
446 | 446 | ||
447 | tmp = RREG32(AVIVO_D1VGA_CONTROL); |
447 | tmp = RREG32(AVIVO_D1VGA_CONTROL); |
448 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); |
448 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); |
449 | tmp = RREG32(AVIVO_D2VGA_CONTROL); |
449 | tmp = RREG32(AVIVO_D2VGA_CONTROL); |
450 | WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); |
450 | WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); |
451 | 451 | ||
452 | tmp = RREG32(AVIVO_D1CRTC_CONTROL); |
452 | tmp = RREG32(AVIVO_D1CRTC_CONTROL); |
453 | WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); |
453 | WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); |
454 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); |
454 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); |
455 | WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); |
455 | WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); |
456 | 456 | ||
457 | /* make sure all previous write got through */ |
457 | /* make sure all previous write got through */ |
458 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); |
458 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); |
459 | 459 | ||
460 | mdelay(1); |
460 | mdelay(1); |
461 | 461 | ||
462 | dbgprintf("done\n"); |
462 | dbgprintf("done\n"); |
463 | 463 | ||
464 | } |
464 | } |
465 | 465 | ||
466 | 466 | ||
467 | 467 | ||
468 | 468 | ||
469 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
469 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
470 | 470 | ||
471 | 471 | ||
472 | 472 | ||
473 | 473 | ||
474 | int radeon_fence_driver_init(struct radeon_device *rdev) |
474 | int radeon_fence_driver_init(struct radeon_device *rdev) |
475 | { |
475 | { |
476 | unsigned long irq_flags; |
476 | unsigned long irq_flags; |
477 | int r; |
477 | int r; |
478 | 478 | ||
479 | // write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
479 | // write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
480 | r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
480 | r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
481 | if (r) { |
481 | if (r) { |
482 | DRM_ERROR("Fence failed to get a scratch register."); |
482 | DRM_ERROR("Fence failed to get a scratch register."); |
483 | // write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
483 | // write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
484 | return r; |
484 | return r; |
485 | } |
485 | } |
486 | WREG32(rdev->fence_drv.scratch_reg, 0); |
486 | WREG32(rdev->fence_drv.scratch_reg, 0); |
487 | // atomic_set(&rdev->fence_drv.seq, 0); |
487 | // atomic_set(&rdev->fence_drv.seq, 0); |
488 | // INIT_LIST_HEAD(&rdev->fence_drv.created); |
488 | // INIT_LIST_HEAD(&rdev->fence_drv.created); |
489 | // INIT_LIST_HEAD(&rdev->fence_drv.emited); |
489 | // INIT_LIST_HEAD(&rdev->fence_drv.emited); |
490 | // INIT_LIST_HEAD(&rdev->fence_drv.signaled); |
490 | // INIT_LIST_HEAD(&rdev->fence_drv.signaled); |
491 | rdev->fence_drv.count_timeout = 0; |
491 | rdev->fence_drv.count_timeout = 0; |
492 | // init_waitqueue_head(&rdev->fence_drv.queue); |
492 | // init_waitqueue_head(&rdev->fence_drv.queue); |
493 | // write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
493 | // write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
494 | // if (radeon_debugfs_fence_init(rdev)) { |
494 | // if (radeon_debugfs_fence_init(rdev)) { |
495 | // DRM_ERROR("Failed to register debugfs file for fence !\n"); |
495 | // DRM_ERROR("Failed to register debugfs file for fence !\n"); |
496 | // } |
496 | // } |
497 | return 0; |
497 | return 0; |
498 | }>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>> |
498 | } |
499 | 499 | ||
500 | 500 | ||
501 | 501 | ||
502 | 502 | ||
503 | - | ||
- | 503 | ||
- | 504 | //domodedovo 9-00 16/07/2009>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>> |