Rev 1275 | Rev 1321 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1275 | Rev 1313 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | //#include |
28 | //#include |
29 | 29 | ||
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include "radeon_reg.h" |
33 | #include "radeon_reg.h" |
34 | #include "radeon.h" |
34 | #include "radeon.h" |
35 | #include "radeon_asic.h" |
35 | #include "radeon_asic.h" |
36 | #include "atom.h" |
36 | #include "atom.h" |
37 | 37 | ||
38 | #include |
38 | #include |
39 | 39 | ||
40 | 40 | ||
41 | int radeon_dynclks = -1; |
41 | int radeon_dynclks = -1; |
42 | int radeon_r4xx_atom = 0; |
42 | int radeon_r4xx_atom = 0; |
43 | int radeon_agpmode = -1; |
43 | int radeon_agpmode = -1; |
44 | int radeon_gart_size = 512; /* default gart size */ |
44 | int radeon_gart_size = 512; /* default gart size */ |
45 | int radeon_benchmarking = 0; |
45 | int radeon_benchmarking = 0; |
46 | int radeon_connector_table = 0; |
46 | int radeon_connector_table = 0; |
47 | int radeon_tv = 0; |
47 | int radeon_tv = 0; |
48 | int radeon_modeset = 1; |
48 | int radeon_modeset = 1; |
49 | 49 | ||
50 | void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms); |
50 | void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms); |
51 | int init_display(struct radeon_device *rdev, mode_t *mode); |
51 | int init_display(struct radeon_device *rdev, mode_t *mode); |
52 | int init_display_kms(struct radeon_device *rdev, mode_t *mode); |
52 | int init_display_kms(struct radeon_device *rdev, mode_t *mode); |
53 | 53 | ||
54 | int get_modes(mode_t *mode, int *count); |
54 | int get_modes(mode_t *mode, int *count); |
55 | int set_user_mode(mode_t *mode); |
55 | int set_user_mode(mode_t *mode); |
56 | 56 | ||
57 | 57 | ||
58 | /* Legacy VGA regions */ |
58 | /* Legacy VGA regions */ |
59 | #define VGA_RSRC_NONE 0x00 |
59 | #define VGA_RSRC_NONE 0x00 |
60 | #define VGA_RSRC_LEGACY_IO 0x01 |
60 | #define VGA_RSRC_LEGACY_IO 0x01 |
61 | #define VGA_RSRC_LEGACY_MEM 0x02 |
61 | #define VGA_RSRC_LEGACY_MEM 0x02 |
62 | #define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) |
62 | #define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) |
63 | /* Non-legacy access */ |
63 | /* Non-legacy access */ |
64 | #define VGA_RSRC_NORMAL_IO 0x04 |
64 | #define VGA_RSRC_NORMAL_IO 0x04 |
65 | #define VGA_RSRC_NORMAL_MEM 0x08 |
65 | #define VGA_RSRC_NORMAL_MEM 0x08 |
66 | 66 | ||
67 | 67 | ||
68 | 68 | ||
69 | /* |
69 | /* |
70 | * Clear GPU surface registers. |
70 | * Clear GPU surface registers. |
71 | */ |
71 | */ |
72 | void radeon_surface_init(struct radeon_device *rdev) |
72 | void radeon_surface_init(struct radeon_device *rdev) |
73 | { |
73 | { |
74 | ENTER(); |
74 | ENTER(); |
75 | 75 | ||
76 | /* FIXME: check this out */ |
76 | /* FIXME: check this out */ |
77 | if (rdev->family < CHIP_R600) { |
77 | if (rdev->family < CHIP_R600) { |
78 | int i; |
78 | int i; |
79 | 79 | ||
80 | for (i = 0; i < 8; i++) { |
80 | for (i = 0; i < 8; i++) { |
81 | WREG32(RADEON_SURFACE0_INFO + |
81 | WREG32(RADEON_SURFACE0_INFO + |
82 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), |
82 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), |
83 | 0); |
83 | 0); |
84 | } |
84 | } |
85 | /* enable surfaces */ |
85 | /* enable surfaces */ |
86 | WREG32(RADEON_SURFACE_CNTL, 0); |
86 | WREG32(RADEON_SURFACE_CNTL, 0); |
87 | } |
87 | } |
88 | } |
88 | } |
89 | 89 | ||
90 | /* |
90 | /* |
91 | * GPU scratch registers helpers function. |
91 | * GPU scratch registers helpers function. |
92 | */ |
92 | */ |
93 | void radeon_scratch_init(struct radeon_device *rdev) |
93 | void radeon_scratch_init(struct radeon_device *rdev) |
94 | { |
94 | { |
95 | int i; |
95 | int i; |
96 | 96 | ||
97 | /* FIXME: check this out */ |
97 | /* FIXME: check this out */ |
98 | if (rdev->family < CHIP_R300) { |
98 | if (rdev->family < CHIP_R300) { |
99 | rdev->scratch.num_reg = 5; |
99 | rdev->scratch.num_reg = 5; |
100 | } else { |
100 | } else { |
101 | rdev->scratch.num_reg = 7; |
101 | rdev->scratch.num_reg = 7; |
102 | } |
102 | } |
103 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
103 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
104 | rdev->scratch.free[i] = true; |
104 | rdev->scratch.free[i] = true; |
105 | rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); |
105 | rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); |
106 | } |
106 | } |
107 | } |
107 | } |
108 | 108 | ||
109 | int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) |
109 | int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) |
110 | { |
110 | { |
111 | int i; |
111 | int i; |
112 | 112 | ||
113 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
113 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
114 | if (rdev->scratch.free[i]) { |
114 | if (rdev->scratch.free[i]) { |
115 | rdev->scratch.free[i] = false; |
115 | rdev->scratch.free[i] = false; |
116 | *reg = rdev->scratch.reg[i]; |
116 | *reg = rdev->scratch.reg[i]; |
117 | return 0; |
117 | return 0; |
118 | } |
118 | } |
119 | } |
119 | } |
120 | return -EINVAL; |
120 | return -EINVAL; |
121 | } |
121 | } |
122 | 122 | ||
123 | void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) |
123 | void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) |
124 | { |
124 | { |
125 | int i; |
125 | int i; |
126 | 126 | ||
127 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
127 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
128 | if (rdev->scratch.reg[i] == reg) { |
128 | if (rdev->scratch.reg[i] == reg) { |
129 | rdev->scratch.free[i] = true; |
129 | rdev->scratch.free[i] = true; |
130 | return; |
130 | return; |
131 | } |
131 | } |
132 | } |
132 | } |
133 | } |
133 | } |
134 | 134 | ||
135 | /* |
135 | /* |
136 | * MC common functions |
136 | * MC common functions |
137 | */ |
137 | */ |
138 | int radeon_mc_setup(struct radeon_device *rdev) |
138 | int radeon_mc_setup(struct radeon_device *rdev) |
139 | { |
139 | { |
140 | uint32_t tmp; |
140 | uint32_t tmp; |
141 | 141 | ||
142 | /* Some chips have an "issue" with the memory controller, the |
142 | /* Some chips have an "issue" with the memory controller, the |
143 | * location must be aligned to the size. We just align it down, |
143 | * location must be aligned to the size. We just align it down, |
144 | * too bad if we walk over the top of system memory, we don't |
144 | * too bad if we walk over the top of system memory, we don't |
145 | * use DMA without a remapped anyway. |
145 | * use DMA without a remapped anyway. |
146 | * Affected chips are rv280, all r3xx, and all r4xx, but not IGP |
146 | * Affected chips are rv280, all r3xx, and all r4xx, but not IGP |
147 | */ |
147 | */ |
148 | /* FGLRX seems to setup like this, VRAM a 0, then GART. |
148 | /* FGLRX seems to setup like this, VRAM a 0, then GART. |
149 | */ |
149 | */ |
150 | /* |
150 | /* |
151 | * Note: from R6xx the address space is 40bits but here we only |
151 | * Note: from R6xx the address space is 40bits but here we only |
152 | * use 32bits (still have to see a card which would exhaust 4G |
152 | * use 32bits (still have to see a card which would exhaust 4G |
153 | * address space). |
153 | * address space). |
154 | */ |
154 | */ |
155 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
155 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
156 | /* vram location was already setup try to put gtt after |
156 | /* vram location was already setup try to put gtt after |
157 | * if it fits */ |
157 | * if it fits */ |
158 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
158 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
159 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
159 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
160 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
160 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
161 | rdev->mc.gtt_location = tmp; |
161 | rdev->mc.gtt_location = tmp; |
162 | } else { |
162 | } else { |
163 | if (rdev->mc.gtt_size >= rdev->mc.vram_location) { |
163 | if (rdev->mc.gtt_size >= rdev->mc.vram_location) { |
164 | printk(KERN_ERR "[drm] GTT too big to fit " |
164 | printk(KERN_ERR "[drm] GTT too big to fit " |
165 | "before or after vram location.\n"); |
165 | "before or after vram location.\n"); |
166 | return -EINVAL; |
166 | return -EINVAL; |
167 | } |
167 | } |
168 | rdev->mc.gtt_location = 0; |
168 | rdev->mc.gtt_location = 0; |
169 | } |
169 | } |
170 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
170 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
171 | /* gtt location was already setup try to put vram before |
171 | /* gtt location was already setup try to put vram before |
172 | * if it fits */ |
172 | * if it fits */ |
173 | if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { |
173 | if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { |
174 | rdev->mc.vram_location = 0; |
174 | rdev->mc.vram_location = 0; |
175 | } else { |
175 | } else { |
176 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
176 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
177 | tmp += (rdev->mc.mc_vram_size - 1); |
177 | tmp += (rdev->mc.mc_vram_size - 1); |
178 | tmp &= ~(rdev->mc.mc_vram_size - 1); |
178 | tmp &= ~(rdev->mc.mc_vram_size - 1); |
179 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { |
179 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { |
180 | rdev->mc.vram_location = tmp; |
180 | rdev->mc.vram_location = tmp; |
181 | } else { |
181 | } else { |
182 | printk(KERN_ERR "[drm] vram too big to fit " |
182 | printk(KERN_ERR "[drm] vram too big to fit " |
183 | "before or after GTT location.\n"); |
183 | "before or after GTT location.\n"); |
184 | return -EINVAL; |
184 | return -EINVAL; |
185 | } |
185 | } |
186 | } |
186 | } |
187 | } else { |
187 | } else { |
188 | rdev->mc.vram_location = 0; |
188 | rdev->mc.vram_location = 0; |
189 | tmp = rdev->mc.mc_vram_size; |
189 | tmp = rdev->mc.mc_vram_size; |
190 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
190 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
191 | rdev->mc.gtt_location = tmp; |
191 | rdev->mc.gtt_location = tmp; |
192 | } |
192 | } |
193 | rdev->mc.vram_start = rdev->mc.vram_location; |
193 | rdev->mc.vram_start = rdev->mc.vram_location; |
194 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
194 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
195 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
195 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
196 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
196 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
197 | DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); |
197 | DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); |
198 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
198 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
199 | (unsigned)rdev->mc.vram_location, |
199 | (unsigned)rdev->mc.vram_location, |
200 | (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); |
200 | (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); |
201 | DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); |
201 | DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); |
202 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
202 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
203 | (unsigned)rdev->mc.gtt_location, |
203 | (unsigned)rdev->mc.gtt_location, |
204 | (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); |
204 | (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); |
205 | return 0; |
205 | return 0; |
206 | } |
206 | } |
207 | 207 | ||
208 | 208 | ||
209 | /* |
209 | /* |
210 | * GPU helpers function. |
210 | * GPU helpers function. |
211 | */ |
211 | */ |
212 | bool radeon_card_posted(struct radeon_device *rdev) |
212 | bool radeon_card_posted(struct radeon_device *rdev) |
213 | { |
213 | { |
214 | uint32_t reg; |
214 | uint32_t reg; |
215 | 215 | ||
216 | /* first check CRTCs */ |
216 | /* first check CRTCs */ |
217 | if (ASIC_IS_AVIVO(rdev)) { |
217 | if (ASIC_IS_AVIVO(rdev)) { |
218 | reg = RREG32(AVIVO_D1CRTC_CONTROL) | |
218 | reg = RREG32(AVIVO_D1CRTC_CONTROL) | |
219 | RREG32(AVIVO_D2CRTC_CONTROL); |
219 | RREG32(AVIVO_D2CRTC_CONTROL); |
220 | if (reg & AVIVO_CRTC_EN) { |
220 | if (reg & AVIVO_CRTC_EN) { |
221 | return true; |
221 | return true; |
222 | } |
222 | } |
223 | } else { |
223 | } else { |
224 | reg = RREG32(RADEON_CRTC_GEN_CNTL) | |
224 | reg = RREG32(RADEON_CRTC_GEN_CNTL) | |
225 | RREG32(RADEON_CRTC2_GEN_CNTL); |
225 | RREG32(RADEON_CRTC2_GEN_CNTL); |
226 | if (reg & RADEON_CRTC_EN) { |
226 | if (reg & RADEON_CRTC_EN) { |
227 | return true; |
227 | return true; |
228 | } |
228 | } |
229 | } |
229 | } |
230 | 230 | ||
231 | /* then check MEM_SIZE, in case the crtcs are off */ |
231 | /* then check MEM_SIZE, in case the crtcs are off */ |
232 | if (rdev->family >= CHIP_R600) |
232 | if (rdev->family >= CHIP_R600) |
233 | reg = RREG32(R600_CONFIG_MEMSIZE); |
233 | reg = RREG32(R600_CONFIG_MEMSIZE); |
234 | else |
234 | else |
235 | reg = RREG32(RADEON_CONFIG_MEMSIZE); |
235 | reg = RREG32(RADEON_CONFIG_MEMSIZE); |
236 | 236 | ||
237 | if (reg) |
237 | if (reg) |
238 | return true; |
238 | return true; |
239 | 239 | ||
240 | return false; |
240 | return false; |
241 | 241 | ||
242 | } |
242 | } |
243 | 243 | ||
244 | int radeon_dummy_page_init(struct radeon_device *rdev) |
244 | int radeon_dummy_page_init(struct radeon_device *rdev) |
245 | { |
245 | { |
246 | rdev->dummy_page.page = AllocPage(); |
246 | rdev->dummy_page.page = AllocPage(); |
247 | if (rdev->dummy_page.page == NULL) |
247 | if (rdev->dummy_page.page == NULL) |
248 | return -ENOMEM; |
248 | return -ENOMEM; |
249 | rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5); |
249 | rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5); |
250 | if (!rdev->dummy_page.addr) { |
250 | if (!rdev->dummy_page.addr) { |
251 | // __free_page(rdev->dummy_page.page); |
251 | // __free_page(rdev->dummy_page.page); |
252 | rdev->dummy_page.page = NULL; |
252 | rdev->dummy_page.page = NULL; |
253 | return -ENOMEM; |
253 | return -ENOMEM; |
254 | } |
254 | } |
255 | return 0; |
255 | return 0; |
256 | } |
256 | } |
257 | 257 | ||
258 | void radeon_dummy_page_fini(struct radeon_device *rdev) |
258 | void radeon_dummy_page_fini(struct radeon_device *rdev) |
259 | { |
259 | { |
260 | if (rdev->dummy_page.page == NULL) |
260 | if (rdev->dummy_page.page == NULL) |
261 | return; |
261 | return; |
262 | KernelFree(rdev->dummy_page.addr); |
262 | KernelFree(rdev->dummy_page.addr); |
263 | rdev->dummy_page.page = NULL; |
263 | rdev->dummy_page.page = NULL; |
264 | } |
264 | } |
265 | 265 | ||
266 | 266 | ||
267 | /* |
267 | /* |
268 | * Registers accessors functions. |
268 | * Registers accessors functions. |
269 | */ |
269 | */ |
270 | uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) |
270 | uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) |
271 | { |
271 | { |
272 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); |
272 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); |
273 | BUG_ON(1); |
273 | BUG_ON(1); |
274 | return 0; |
274 | return 0; |
275 | } |
275 | } |
276 | 276 | ||
277 | void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
277 | void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
278 | { |
278 | { |
279 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", |
279 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", |
280 | reg, v); |
280 | reg, v); |
281 | BUG_ON(1); |
281 | BUG_ON(1); |
282 | } |
282 | } |
283 | 283 | ||
284 | void radeon_register_accessor_init(struct radeon_device *rdev) |
284 | void radeon_register_accessor_init(struct radeon_device *rdev) |
285 | { |
285 | { |
286 | rdev->mc_rreg = &radeon_invalid_rreg; |
286 | rdev->mc_rreg = &radeon_invalid_rreg; |
287 | rdev->mc_wreg = &radeon_invalid_wreg; |
287 | rdev->mc_wreg = &radeon_invalid_wreg; |
288 | rdev->pll_rreg = &radeon_invalid_rreg; |
288 | rdev->pll_rreg = &radeon_invalid_rreg; |
289 | rdev->pll_wreg = &radeon_invalid_wreg; |
289 | rdev->pll_wreg = &radeon_invalid_wreg; |
290 | rdev->pciep_rreg = &radeon_invalid_rreg; |
290 | rdev->pciep_rreg = &radeon_invalid_rreg; |
291 | rdev->pciep_wreg = &radeon_invalid_wreg; |
291 | rdev->pciep_wreg = &radeon_invalid_wreg; |
292 | 292 | ||
293 | /* Don't change order as we are overridding accessor. */ |
293 | /* Don't change order as we are overridding accessor. */ |
294 | if (rdev->family < CHIP_RV515) { |
294 | if (rdev->family < CHIP_RV515) { |
295 | rdev->pcie_reg_mask = 0xff; |
295 | rdev->pcie_reg_mask = 0xff; |
296 | } else { |
296 | } else { |
297 | rdev->pcie_reg_mask = 0x7ff; |
297 | rdev->pcie_reg_mask = 0x7ff; |
298 | } |
298 | } |
299 | /* FIXME: not sure here */ |
299 | /* FIXME: not sure here */ |
300 | if (rdev->family <= CHIP_R580) { |
300 | if (rdev->family <= CHIP_R580) { |
301 | rdev->pll_rreg = &r100_pll_rreg; |
301 | rdev->pll_rreg = &r100_pll_rreg; |
302 | rdev->pll_wreg = &r100_pll_wreg; |
302 | rdev->pll_wreg = &r100_pll_wreg; |
303 | } |
303 | } |
304 | if (rdev->family >= CHIP_R420) { |
304 | if (rdev->family >= CHIP_R420) { |
305 | rdev->mc_rreg = &r420_mc_rreg; |
305 | rdev->mc_rreg = &r420_mc_rreg; |
306 | rdev->mc_wreg = &r420_mc_wreg; |
306 | rdev->mc_wreg = &r420_mc_wreg; |
307 | } |
307 | } |
308 | if (rdev->family >= CHIP_RV515) { |
308 | if (rdev->family >= CHIP_RV515) { |
309 | rdev->mc_rreg = &rv515_mc_rreg; |
309 | rdev->mc_rreg = &rv515_mc_rreg; |
310 | rdev->mc_wreg = &rv515_mc_wreg; |
310 | rdev->mc_wreg = &rv515_mc_wreg; |
311 | } |
311 | } |
312 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { |
312 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { |
313 | rdev->mc_rreg = &rs400_mc_rreg; |
313 | rdev->mc_rreg = &rs400_mc_rreg; |
314 | rdev->mc_wreg = &rs400_mc_wreg; |
314 | rdev->mc_wreg = &rs400_mc_wreg; |
315 | } |
315 | } |
316 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
316 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
317 | rdev->mc_rreg = &rs690_mc_rreg; |
317 | rdev->mc_rreg = &rs690_mc_rreg; |
318 | rdev->mc_wreg = &rs690_mc_wreg; |
318 | rdev->mc_wreg = &rs690_mc_wreg; |
319 | } |
319 | } |
320 | if (rdev->family == CHIP_RS600) { |
320 | if (rdev->family == CHIP_RS600) { |
321 | rdev->mc_rreg = &rs600_mc_rreg; |
321 | rdev->mc_rreg = &rs600_mc_rreg; |
322 | rdev->mc_wreg = &rs600_mc_wreg; |
322 | rdev->mc_wreg = &rs600_mc_wreg; |
323 | } |
323 | } |
324 | if (rdev->family >= CHIP_R600) { |
324 | if (rdev->family >= CHIP_R600) { |
325 | rdev->pciep_rreg = &r600_pciep_rreg; |
325 | rdev->pciep_rreg = &r600_pciep_rreg; |
326 | rdev->pciep_wreg = &r600_pciep_wreg; |
326 | rdev->pciep_wreg = &r600_pciep_wreg; |
327 | } |
327 | } |
328 | } |
328 | } |
329 | 329 | ||
330 | 330 | ||
331 | /* |
331 | /* |
332 | * ASIC |
332 | * ASIC |
333 | */ |
333 | */ |
334 | int radeon_asic_init(struct radeon_device *rdev) |
334 | int radeon_asic_init(struct radeon_device *rdev) |
335 | { |
335 | { |
336 | radeon_register_accessor_init(rdev); |
336 | radeon_register_accessor_init(rdev); |
337 | switch (rdev->family) { |
337 | switch (rdev->family) { |
338 | case CHIP_R100: |
338 | case CHIP_R100: |
339 | case CHIP_RV100: |
339 | case CHIP_RV100: |
340 | case CHIP_RS100: |
340 | case CHIP_RS100: |
341 | case CHIP_RV200: |
341 | case CHIP_RV200: |
342 | case CHIP_RS200: |
342 | case CHIP_RS200: |
343 | case CHIP_R200: |
343 | case CHIP_R200: |
344 | case CHIP_RV250: |
344 | case CHIP_RV250: |
345 | case CHIP_RS300: |
345 | case CHIP_RS300: |
346 | case CHIP_RV280: |
346 | case CHIP_RV280: |
347 | rdev->asic = &r100_asic; |
347 | rdev->asic = &r100_asic; |
348 | break; |
348 | break; |
349 | case CHIP_R300: |
349 | case CHIP_R300: |
350 | case CHIP_R350: |
350 | case CHIP_R350: |
351 | case CHIP_RV350: |
351 | case CHIP_RV350: |
352 | case CHIP_RV380: |
352 | case CHIP_RV380: |
353 | rdev->asic = &r300_asic; |
353 | rdev->asic = &r300_asic; |
354 | if (rdev->flags & RADEON_IS_PCIE) { |
354 | if (rdev->flags & RADEON_IS_PCIE) { |
355 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
355 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
356 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
356 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
357 | } |
357 | } |
358 | break; |
358 | break; |
359 | case CHIP_R420: |
359 | case CHIP_R420: |
360 | case CHIP_R423: |
360 | case CHIP_R423: |
361 | case CHIP_RV410: |
361 | case CHIP_RV410: |
362 | rdev->asic = &r420_asic; |
362 | rdev->asic = &r420_asic; |
363 | break; |
363 | break; |
364 | case CHIP_RS400: |
364 | case CHIP_RS400: |
365 | case CHIP_RS480: |
365 | case CHIP_RS480: |
366 | rdev->asic = &rs400_asic; |
366 | rdev->asic = &rs400_asic; |
367 | break; |
367 | break; |
368 | case CHIP_RS600: |
368 | case CHIP_RS600: |
369 | rdev->asic = &rs600_asic; |
369 | rdev->asic = &rs600_asic; |
370 | break; |
370 | break; |
371 | case CHIP_RS690: |
371 | case CHIP_RS690: |
372 | case CHIP_RS740: |
372 | case CHIP_RS740: |
373 | rdev->asic = &rs690_asic; |
373 | rdev->asic = &rs690_asic; |
374 | break; |
374 | break; |
375 | case CHIP_RV515: |
375 | case CHIP_RV515: |
376 | rdev->asic = &rv515_asic; |
376 | rdev->asic = &rv515_asic; |
377 | break; |
377 | break; |
378 | case CHIP_R520: |
378 | case CHIP_R520: |
379 | case CHIP_RV530: |
379 | case CHIP_RV530: |
380 | case CHIP_RV560: |
380 | case CHIP_RV560: |
381 | case CHIP_RV570: |
381 | case CHIP_RV570: |
382 | case CHIP_R580: |
382 | case CHIP_R580: |
383 | rdev->asic = &r520_asic; |
383 | rdev->asic = &r520_asic; |
384 | break; |
384 | break; |
385 | case CHIP_R600: |
385 | case CHIP_R600: |
386 | case CHIP_RV610: |
386 | case CHIP_RV610: |
387 | case CHIP_RV630: |
387 | case CHIP_RV630: |
388 | case CHIP_RV620: |
388 | case CHIP_RV620: |
389 | case CHIP_RV635: |
389 | case CHIP_RV635: |
390 | case CHIP_RV670: |
390 | case CHIP_RV670: |
391 | case CHIP_RS780: |
391 | case CHIP_RS780: |
392 | case CHIP_RS880: |
392 | case CHIP_RS880: |
393 | rdev->asic = &r600_asic; |
393 | rdev->asic = &r600_asic; |
394 | break; |
394 | break; |
395 | case CHIP_RV770: |
395 | case CHIP_RV770: |
396 | case CHIP_RV730: |
396 | case CHIP_RV730: |
397 | case CHIP_RV710: |
397 | case CHIP_RV710: |
398 | case CHIP_RV740: |
398 | case CHIP_RV740: |
399 | rdev->asic = &rv770_asic; |
399 | rdev->asic = &rv770_asic; |
400 | break; |
400 | break; |
401 | default: |
401 | default: |
402 | /* FIXME: not supported yet */ |
402 | /* FIXME: not supported yet */ |
403 | return -EINVAL; |
403 | return -EINVAL; |
404 | } |
404 | } |
405 | return 0; |
405 | return 0; |
406 | } |
406 | } |
407 | 407 | ||
408 | 408 | ||
409 | /* |
409 | /* |
410 | * Wrapper around modesetting bits. |
410 | * Wrapper around modesetting bits. |
411 | */ |
411 | */ |
412 | int radeon_clocks_init(struct radeon_device *rdev) |
412 | int radeon_clocks_init(struct radeon_device *rdev) |
413 | { |
413 | { |
414 | int r; |
414 | int r; |
415 | 415 | ||
416 | r = radeon_static_clocks_init(rdev->ddev); |
416 | r = radeon_static_clocks_init(rdev->ddev); |
417 | if (r) { |
417 | if (r) { |
418 | return r; |
418 | return r; |
419 | } |
419 | } |
420 | DRM_INFO("Clocks initialized !\n"); |
420 | DRM_INFO("Clocks initialized !\n"); |
421 | return 0; |
421 | return 0; |
422 | } |
422 | } |
423 | 423 | ||
424 | void radeon_clocks_fini(struct radeon_device *rdev) |
424 | void radeon_clocks_fini(struct radeon_device *rdev) |
425 | { |
425 | { |
426 | } |
426 | } |
427 | 427 | ||
428 | /* ATOM accessor methods */ |
428 | /* ATOM accessor methods */ |
429 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) |
429 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) |
430 | { |
430 | { |
431 | struct radeon_device *rdev = info->dev->dev_private; |
431 | struct radeon_device *rdev = info->dev->dev_private; |
432 | uint32_t r; |
432 | uint32_t r; |
433 | 433 | ||
434 | r = rdev->pll_rreg(rdev, reg); |
434 | r = rdev->pll_rreg(rdev, reg); |
435 | return r; |
435 | return r; |
436 | } |
436 | } |
437 | 437 | ||
438 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) |
438 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) |
439 | { |
439 | { |
440 | struct radeon_device *rdev = info->dev->dev_private; |
440 | struct radeon_device *rdev = info->dev->dev_private; |
441 | 441 | ||
442 | rdev->pll_wreg(rdev, reg, val); |
442 | rdev->pll_wreg(rdev, reg, val); |
443 | } |
443 | } |
444 | 444 | ||
445 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) |
445 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) |
446 | { |
446 | { |
447 | struct radeon_device *rdev = info->dev->dev_private; |
447 | struct radeon_device *rdev = info->dev->dev_private; |
448 | uint32_t r; |
448 | uint32_t r; |
449 | 449 | ||
450 | r = rdev->mc_rreg(rdev, reg); |
450 | r = rdev->mc_rreg(rdev, reg); |
451 | return r; |
451 | return r; |
452 | } |
452 | } |
453 | 453 | ||
454 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) |
454 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) |
455 | { |
455 | { |
456 | struct radeon_device *rdev = info->dev->dev_private; |
456 | struct radeon_device *rdev = info->dev->dev_private; |
457 | 457 | ||
458 | rdev->mc_wreg(rdev, reg, val); |
458 | rdev->mc_wreg(rdev, reg, val); |
459 | } |
459 | } |
460 | 460 | ||
461 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) |
461 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) |
462 | { |
462 | { |
463 | struct radeon_device *rdev = info->dev->dev_private; |
463 | struct radeon_device *rdev = info->dev->dev_private; |
464 | 464 | ||
465 | WREG32(reg*4, val); |
465 | WREG32(reg*4, val); |
466 | } |
466 | } |
467 | 467 | ||
468 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) |
468 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) |
469 | { |
469 | { |
470 | struct radeon_device *rdev = info->dev->dev_private; |
470 | struct radeon_device *rdev = info->dev->dev_private; |
471 | uint32_t r; |
471 | uint32_t r; |
472 | 472 | ||
473 | r = RREG32(reg*4); |
473 | r = RREG32(reg*4); |
474 | return r; |
474 | return r; |
475 | } |
475 | } |
476 | 476 | ||
477 | int radeon_atombios_init(struct radeon_device *rdev) |
477 | int radeon_atombios_init(struct radeon_device *rdev) |
478 | { |
478 | { |
479 | struct card_info *atom_card_info = |
479 | struct card_info *atom_card_info = |
480 | kzalloc(sizeof(struct card_info), GFP_KERNEL); |
480 | kzalloc(sizeof(struct card_info), GFP_KERNEL); |
481 | 481 | ||
482 | if (!atom_card_info) |
482 | if (!atom_card_info) |
483 | return -ENOMEM; |
483 | return -ENOMEM; |
484 | 484 | ||
485 | rdev->mode_info.atom_card_info = atom_card_info; |
485 | rdev->mode_info.atom_card_info = atom_card_info; |
486 | atom_card_info->dev = rdev->ddev; |
486 | atom_card_info->dev = rdev->ddev; |
487 | atom_card_info->reg_read = cail_reg_read; |
487 | atom_card_info->reg_read = cail_reg_read; |
488 | atom_card_info->reg_write = cail_reg_write; |
488 | atom_card_info->reg_write = cail_reg_write; |
489 | atom_card_info->mc_read = cail_mc_read; |
489 | atom_card_info->mc_read = cail_mc_read; |
490 | atom_card_info->mc_write = cail_mc_write; |
490 | atom_card_info->mc_write = cail_mc_write; |
491 | atom_card_info->pll_read = cail_pll_read; |
491 | atom_card_info->pll_read = cail_pll_read; |
492 | atom_card_info->pll_write = cail_pll_write; |
492 | atom_card_info->pll_write = cail_pll_write; |
493 | 493 | ||
494 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); |
494 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); |
495 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
495 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
496 | return 0; |
496 | return 0; |
497 | } |
497 | } |
498 | 498 | ||
499 | void radeon_atombios_fini(struct radeon_device *rdev) |
499 | void radeon_atombios_fini(struct radeon_device *rdev) |
500 | { |
500 | { |
501 | kfree(rdev->mode_info.atom_context); |
501 | kfree(rdev->mode_info.atom_context); |
502 | kfree(rdev->mode_info.atom_card_info); |
502 | kfree(rdev->mode_info.atom_card_info); |
503 | } |
503 | } |
504 | 504 | ||
505 | int radeon_combios_init(struct radeon_device *rdev) |
505 | int radeon_combios_init(struct radeon_device *rdev) |
506 | { |
506 | { |
507 | radeon_combios_initialize_bios_scratch_regs(rdev->ddev); |
507 | radeon_combios_initialize_bios_scratch_regs(rdev->ddev); |
508 | return 0; |
508 | return 0; |
509 | } |
509 | } |
510 | 510 | ||
511 | void radeon_combios_fini(struct radeon_device *rdev) |
511 | void radeon_combios_fini(struct radeon_device *rdev) |
512 | { |
512 | { |
513 | } |
513 | } |
514 | 514 | ||
515 | /* if we get transitioned to only one device, tak VGA back */ |
515 | /* if we get transitioned to only one device, tak VGA back */ |
516 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) |
516 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) |
517 | { |
517 | { |
518 | struct radeon_device *rdev = cookie; |
518 | struct radeon_device *rdev = cookie; |
519 | radeon_vga_set_state(rdev, state); |
519 | radeon_vga_set_state(rdev, state); |
520 | if (state) |
520 | if (state) |
521 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
521 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
522 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
522 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
523 | else |
523 | else |
524 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
524 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
525 | } |
525 | } |
526 | 526 | ||
527 | void radeon_agp_disable(struct radeon_device *rdev) |
527 | void radeon_agp_disable(struct radeon_device *rdev) |
528 | { |
528 | { |
529 | rdev->flags &= ~RADEON_IS_AGP; |
529 | rdev->flags &= ~RADEON_IS_AGP; |
530 | if (rdev->family >= CHIP_R600) { |
530 | if (rdev->family >= CHIP_R600) { |
531 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
531 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
532 | rdev->flags |= RADEON_IS_PCIE; |
532 | rdev->flags |= RADEON_IS_PCIE; |
533 | } else if (rdev->family >= CHIP_RV515 || |
533 | } else if (rdev->family >= CHIP_RV515 || |
534 | rdev->family == CHIP_RV380 || |
534 | rdev->family == CHIP_RV380 || |
535 | rdev->family == CHIP_RV410 || |
535 | rdev->family == CHIP_RV410 || |
536 | rdev->family == CHIP_R423) { |
536 | rdev->family == CHIP_R423) { |
537 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
537 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
538 | rdev->flags |= RADEON_IS_PCIE; |
538 | rdev->flags |= RADEON_IS_PCIE; |
539 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
539 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
540 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
540 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
541 | } else { |
541 | } else { |
542 | DRM_INFO("Forcing AGP to PCI mode\n"); |
542 | DRM_INFO("Forcing AGP to PCI mode\n"); |
543 | rdev->flags |= RADEON_IS_PCI; |
543 | rdev->flags |= RADEON_IS_PCI; |
544 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
544 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
545 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
545 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
546 | } |
546 | } |
547 | } |
547 | } |
548 | 548 | ||
549 | /* |
549 | /* |
550 | * Radeon device. |
550 | * Radeon device. |
551 | */ |
551 | */ |
552 | int radeon_device_init(struct radeon_device *rdev, |
552 | int radeon_device_init(struct radeon_device *rdev, |
553 | struct drm_device *ddev, |
553 | struct drm_device *ddev, |
554 | struct pci_dev *pdev, |
554 | struct pci_dev *pdev, |
555 | uint32_t flags) |
555 | uint32_t flags) |
556 | { |
556 | { |
557 | int r; |
557 | int r; |
558 | int dma_bits; |
558 | int dma_bits; |
559 | 559 | ||
560 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
560 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
561 | rdev->shutdown = false; |
561 | rdev->shutdown = false; |
562 | rdev->ddev = ddev; |
562 | rdev->ddev = ddev; |
563 | rdev->pdev = pdev; |
563 | rdev->pdev = pdev; |
564 | rdev->flags = flags; |
564 | rdev->flags = flags; |
565 | rdev->family = flags & RADEON_FAMILY_MASK; |
565 | rdev->family = flags & RADEON_FAMILY_MASK; |
566 | rdev->is_atom_bios = false; |
566 | rdev->is_atom_bios = false; |
567 | rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; |
567 | rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; |
568 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
568 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
569 | rdev->gpu_lockup = false; |
569 | rdev->gpu_lockup = false; |
570 | rdev->accel_working = false; |
570 | rdev->accel_working = false; |
571 | /* mutex initialization are all done here so we |
571 | /* mutex initialization are all done here so we |
572 | * can recall function without having locking issues */ |
572 | * can recall function without having locking issues */ |
573 | // mutex_init(&rdev->cs_mutex); |
573 | // mutex_init(&rdev->cs_mutex); |
574 | // mutex_init(&rdev->ib_pool.mutex); |
574 | // mutex_init(&rdev->ib_pool.mutex); |
575 | // mutex_init(&rdev->cp.mutex); |
575 | // mutex_init(&rdev->cp.mutex); |
576 | // rwlock_init(&rdev->fence_drv.lock); |
576 | // rwlock_init(&rdev->fence_drv.lock); |
577 | 577 | ||
578 | /* Set asic functions */ |
578 | /* Set asic functions */ |
579 | r = radeon_asic_init(rdev); |
579 | r = radeon_asic_init(rdev); |
580 | if (r) { |
580 | if (r) { |
581 | return r; |
581 | return r; |
582 | } |
582 | } |
583 | 583 | ||
584 | if (radeon_agpmode == -1) { |
584 | if (radeon_agpmode == -1) { |
585 | radeon_agp_disable(rdev); |
585 | radeon_agp_disable(rdev); |
586 | } |
586 | } |
587 | 587 | ||
588 | /* set DMA mask + need_dma32 flags. |
588 | /* set DMA mask + need_dma32 flags. |
589 | * PCIE - can handle 40-bits. |
589 | * PCIE - can handle 40-bits. |
590 | * IGP - can handle 40-bits (in theory) |
590 | * IGP - can handle 40-bits (in theory) |
591 | * AGP - generally dma32 is safest |
591 | * AGP - generally dma32 is safest |
592 | * PCI - only dma32 |
592 | * PCI - only dma32 |
593 | */ |
593 | */ |
594 | rdev->need_dma32 = false; |
594 | rdev->need_dma32 = false; |
595 | if (rdev->flags & RADEON_IS_AGP) |
595 | if (rdev->flags & RADEON_IS_AGP) |
596 | rdev->need_dma32 = true; |
596 | rdev->need_dma32 = true; |
597 | if (rdev->flags & RADEON_IS_PCI) |
597 | if (rdev->flags & RADEON_IS_PCI) |
598 | rdev->need_dma32 = true; |
598 | rdev->need_dma32 = true; |
599 | 599 | ||
600 | dma_bits = rdev->need_dma32 ? 32 : 40; |
600 | dma_bits = rdev->need_dma32 ? 32 : 40; |
601 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
601 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
602 | if (r) { |
602 | if (r) { |
603 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
603 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
604 | } |
604 | } |
605 | 605 | ||
606 | /* Registers mapping */ |
606 | /* Registers mapping */ |
607 | /* TODO: block userspace mapping of io register */ |
607 | /* TODO: block userspace mapping of io register */ |
608 | rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); |
608 | rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); |
609 | 609 | ||
610 | rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); |
610 | rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); |
611 | 611 | ||
612 | rdev->rmmio = (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size, |
612 | rdev->rmmio = (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size, |
613 | PG_SW+PG_NOCACHE); |
613 | PG_SW+PG_NOCACHE); |
614 | 614 | ||
615 | if (rdev->rmmio == NULL) { |
615 | if (rdev->rmmio == NULL) { |
616 | return -ENOMEM; |
616 | return -ENOMEM; |
617 | } |
617 | } |
618 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
618 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
619 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
619 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
620 | 620 | ||
621 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
621 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
622 | // r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
622 | // r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
623 | // if (r) { |
623 | // if (r) { |
624 | // return -EINVAL; |
624 | // return -EINVAL; |
625 | // } |
625 | // } |
626 | 626 | ||
627 | r = radeon_init(rdev); |
627 | r = radeon_init(rdev); |
628 | if (r) |
628 | if (r) |
629 | return r; |
629 | return r; |
630 | 630 | ||
631 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
631 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
632 | /* Acceleration not working on AGP card try again |
632 | /* Acceleration not working on AGP card try again |
633 | * with fallback to PCI or PCIE GART |
633 | * with fallback to PCI or PCIE GART |
634 | */ |
634 | */ |
635 | radeon_gpu_reset(rdev); |
635 | radeon_gpu_reset(rdev); |
636 | radeon_fini(rdev); |
636 | radeon_fini(rdev); |
637 | radeon_agp_disable(rdev); |
637 | radeon_agp_disable(rdev); |
638 | r = radeon_init(rdev); |
638 | r = radeon_init(rdev); |
639 | if (r) |
639 | if (r) |
640 | return r; |
640 | return r; |
641 | } |
641 | } |
642 | // if (radeon_testing) { |
642 | // if (radeon_testing) { |
643 | // radeon_test_moves(rdev); |
643 | // radeon_test_moves(rdev); |
644 | // } |
644 | // } |
645 | // if (radeon_benchmarking) { |
645 | // if (radeon_benchmarking) { |
646 | // radeon_benchmark(rdev); |
646 | // radeon_benchmark(rdev); |
647 | // } |
647 | // } |
648 | return 0; |
648 | return 0; |
649 | } |
649 | } |
650 | 650 | ||
651 | 651 | ||
652 | /* |
652 | /* |
653 | * Driver load/unload |
653 | * Driver load/unload |
654 | */ |
654 | */ |
655 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) |
655 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) |
656 | { |
656 | { |
657 | struct radeon_device *rdev; |
657 | struct radeon_device *rdev; |
658 | int r; |
658 | int r; |
659 | 659 | ||
660 | ENTER(); |
660 | ENTER(); |
661 | 661 | ||
662 | rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); |
662 | rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); |
663 | if (rdev == NULL) { |
663 | if (rdev == NULL) { |
664 | return -ENOMEM; |
664 | return -ENOMEM; |
665 | }; |
665 | }; |
666 | 666 | ||
667 | dev->dev_private = (void *)rdev; |
667 | dev->dev_private = (void *)rdev; |
668 | 668 | ||
669 | /* update BUS flag */ |
669 | /* update BUS flag */ |
670 | if (drm_device_is_agp(dev)) { |
670 | if (drm_device_is_agp(dev)) { |
671 | flags |= RADEON_IS_AGP; |
671 | flags |= RADEON_IS_AGP; |
672 | } else if (drm_device_is_pcie(dev)) { |
672 | } else if (drm_device_is_pcie(dev)) { |
673 | flags |= RADEON_IS_PCIE; |
673 | flags |= RADEON_IS_PCIE; |
674 | } else { |
674 | } else { |
675 | flags |= RADEON_IS_PCI; |
675 | flags |= RADEON_IS_PCI; |
676 | } |
676 | } |
677 | 677 | ||
678 | /* radeon_device_init should report only fatal error |
678 | /* radeon_device_init should report only fatal error |
679 | * like memory allocation failure or iomapping failure, |
679 | * like memory allocation failure or iomapping failure, |
680 | * or memory manager initialization failure, it must |
680 | * or memory manager initialization failure, it must |
681 | * properly initialize the GPU MC controller and permit |
681 | * properly initialize the GPU MC controller and permit |
682 | * VRAM allocation |
682 | * VRAM allocation |
683 | */ |
683 | */ |
684 | r = radeon_device_init(rdev, dev, dev->pdev, flags); |
684 | r = radeon_device_init(rdev, dev, dev->pdev, flags); |
685 | if (r) { |
685 | if (r) { |
686 | DRM_ERROR("Fatal error while trying to initialize radeon.\n"); |
686 | DRM_ERROR("Fatal error while trying to initialize radeon.\n"); |
687 | return r; |
687 | return r; |
688 | } |
688 | } |
689 | /* Again modeset_init should fail only on fatal error |
689 | /* Again modeset_init should fail only on fatal error |
690 | * otherwise it should provide enough functionalities |
690 | * otherwise it should provide enough functionalities |
691 | * for shadowfb to run |
691 | * for shadowfb to run |
692 | */ |
692 | */ |
693 | if( radeon_modeset ) |
693 | if( radeon_modeset ) |
694 | { |
694 | { |
695 | r = radeon_modeset_init(rdev); |
695 | r = radeon_modeset_init(rdev); |
696 | if (r) { |
696 | if (r) { |
697 | return r; |
697 | return r; |
698 | } |
698 | } |
699 | }; |
699 | }; |
700 | return 0; |
700 | return 0; |
701 | } |
701 | } |
702 | 702 | ||
703 | mode_t usermode; |
703 | mode_t usermode; |
704 | 704 | ||
705 | 705 | ||
706 | int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) |
706 | int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) |
707 | { |
707 | { |
708 | static struct drm_device *dev; |
708 | static struct drm_device *dev; |
709 | int ret; |
709 | int ret; |
710 | 710 | ||
711 | ENTER(); |
711 | ENTER(); |
712 | 712 | ||
713 | dev = kzalloc(sizeof(*dev), 0); |
713 | dev = kzalloc(sizeof(*dev), 0); |
714 | if (!dev) |
714 | if (!dev) |
715 | return -ENOMEM; |
715 | return -ENOMEM; |
716 | 716 | ||
717 | // ret = pci_enable_device(pdev); |
717 | // ret = pci_enable_device(pdev); |
718 | // if (ret) |
718 | // if (ret) |
719 | // goto err_g1; |
719 | // goto err_g1; |
720 | 720 | ||
721 | // pci_set_master(pdev); |
721 | // pci_set_master(pdev); |
722 | 722 | ||
723 | // if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { |
723 | // if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { |
724 | // printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); |
724 | // printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); |
725 | // goto err_g2; |
725 | // goto err_g2; |
726 | // } |
726 | // } |
727 | 727 | ||
728 | dev->pdev = pdev; |
728 | dev->pdev = pdev; |
729 | dev->pci_device = pdev->device; |
729 | dev->pci_device = pdev->device; |
730 | dev->pci_vendor = pdev->vendor; |
730 | dev->pci_vendor = pdev->vendor; |
731 | 731 | ||
732 | ret = radeon_driver_load_kms(dev, ent->driver_data ); |
732 | ret = radeon_driver_load_kms(dev, ent->driver_data ); |
733 | if (ret) |
733 | if (ret) |
734 | goto err_g4; |
734 | goto err_g4; |
735 | 735 | ||
736 | // list_add_tail(&dev->driver_item, &driver->device_list); |
736 | // list_add_tail(&dev->driver_item, &driver->device_list); |
737 | 737 | ||
738 | // DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", |
738 | // DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", |
739 | // driver->name, driver->major, driver->minor, driver->patchlevel, |
739 | // driver->name, driver->major, driver->minor, driver->patchlevel, |
740 | // driver->date, pci_name(pdev), dev->primary->index); |
740 | // driver->date, pci_name(pdev), dev->primary->index); |
741 | 741 | ||
742 | if( radeon_modeset ) |
742 | if( radeon_modeset ) |
743 | init_display_kms(dev->dev_private, &usermode); |
743 | init_display_kms(dev->dev_private, &usermode); |
744 | else |
744 | else |
745 | init_display(dev->dev_private, &usermode); |
745 | init_display(dev->dev_private, &usermode); |
746 | 746 | ||
747 | LEAVE(); |
747 | LEAVE(); |
748 | 748 | ||
749 | return 0; |
749 | return 0; |
750 | 750 | ||
751 | err_g4: |
751 | err_g4: |
752 | // drm_put_minor(&dev->primary); |
752 | // drm_put_minor(&dev->primary); |
753 | //err_g3: |
753 | //err_g3: |
754 | // if (drm_core_check_feature(dev, DRIVER_MODESET)) |
754 | // if (drm_core_check_feature(dev, DRIVER_MODESET)) |
755 | // drm_put_minor(&dev->control); |
755 | // drm_put_minor(&dev->control); |
756 | //err_g2: |
756 | //err_g2: |
757 | // pci_disable_device(pdev); |
757 | // pci_disable_device(pdev); |
758 | //err_g1: |
758 | //err_g1: |
759 | free(dev); |
759 | free(dev); |
760 | 760 | ||
761 | LEAVE(); |
761 | LEAVE(); |
762 | 762 | ||
763 | return ret; |
763 | return ret; |
764 | } |
764 | } |
765 | 765 | ||
766 | resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource) |
766 | resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource) |
767 | { |
767 | { |
768 | return pci_resource_start(dev->pdev, resource); |
768 | return pci_resource_start(dev->pdev, resource); |
769 | } |
769 | } |
770 | 770 | ||
771 | resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource) |
771 | resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource) |
772 | { |
772 | { |
773 | return pci_resource_len(dev->pdev, resource); |
773 | return pci_resource_len(dev->pdev, resource); |
774 | } |
774 | } |
775 | 775 | ||
776 | 776 | ||
777 | uint32_t __div64_32(uint64_t *n, uint32_t base) |
777 | uint32_t __div64_32(uint64_t *n, uint32_t base) |
778 | { |
778 | { |
779 | uint64_t rem = *n; |
779 | uint64_t rem = *n; |
780 | uint64_t b = base; |
780 | uint64_t b = base; |
781 | uint64_t res, d = 1; |
781 | uint64_t res, d = 1; |
782 | uint32_t high = rem >> 32; |
782 | uint32_t high = rem >> 32; |
783 | 783 | ||
784 | /* Reduce the thing a bit first */ |
784 | /* Reduce the thing a bit first */ |
785 | res = 0; |
785 | res = 0; |
786 | if (high >= base) { |
786 | if (high >= base) { |
787 | high /= base; |
787 | high /= base; |
788 | res = (uint64_t) high << 32; |
788 | res = (uint64_t) high << 32; |
789 | rem -= (uint64_t) (high*base) << 32; |
789 | rem -= (uint64_t) (high*base) << 32; |
790 | } |
790 | } |
791 | 791 | ||
792 | while ((int64_t)b > 0 && b < rem) { |
792 | while ((int64_t)b > 0 && b < rem) { |
793 | b = b+b; |
793 | b = b+b; |
794 | d = d+d; |
794 | d = d+d; |
795 | } |
795 | } |
796 | 796 | ||
797 | do { |
797 | do { |
798 | if (rem >= b) { |
798 | if (rem >= b) { |
799 | rem -= b; |
799 | rem -= b; |
800 | res += d; |
800 | res += d; |
801 | } |
801 | } |
802 | b >>= 1; |
802 | b >>= 1; |
803 | d >>= 1; |
803 | d >>= 1; |
804 | } while (d); |
804 | } while (d); |
805 | 805 | ||
806 | *n = res; |
806 | *n = res; |
807 | return rem; |
807 | return rem; |
808 | } |
808 | } |
809 | 809 | ||
810 | 810 | ||
811 | static struct pci_device_id pciidlist[] = { |
811 | static struct pci_device_id pciidlist[] = { |
812 | radeon_PCI_IDS |
812 | radeon_PCI_IDS |
813 | }; |
813 | }; |
814 | 814 | ||
815 | 815 | ||
816 | #define API_VERSION 0x01000100 |
816 | #define API_VERSION 0x01000100 |
817 | 817 | ||
818 | #define SRV_GETVERSION 0 |
818 | #define SRV_GETVERSION 0 |
819 | #define SRV_ENUM_MODES 1 |
819 | #define SRV_ENUM_MODES 1 |
820 | #define SRV_SET_MODE 2 |
820 | #define SRV_SET_MODE 2 |
821 | 821 | ||
822 | int _stdcall display_handler(ioctl_t *io) |
822 | int _stdcall display_handler(ioctl_t *io) |
823 | { |
823 | { |
824 | int retval = -1; |
824 | int retval = -1; |
825 | u32_t *inp; |
825 | u32_t *inp; |
826 | u32_t *outp; |
826 | u32_t *outp; |
827 | 827 | ||
828 | inp = io->input; |
828 | inp = io->input; |
829 | outp = io->output; |
829 | outp = io->output; |
830 | 830 | ||
831 | switch(io->io_code) |
831 | switch(io->io_code) |
832 | { |
832 | { |
833 | case SRV_GETVERSION: |
833 | case SRV_GETVERSION: |
834 | if(io->out_size==4) |
834 | if(io->out_size==4) |
835 | { |
835 | { |
836 | *outp = API_VERSION; |
836 | *outp = API_VERSION; |
837 | retval = 0; |
837 | retval = 0; |
838 | } |
838 | } |
839 | break; |
839 | break; |
840 | 840 | ||
841 | case SRV_ENUM_MODES: |
841 | case SRV_ENUM_MODES: |
842 | dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n", |
842 | dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n", |
843 | inp, io->inp_size, io->out_size ); |
843 | inp, io->inp_size, io->out_size ); |
844 | 844 | ||
845 | if( radeon_modeset && |
845 | if( radeon_modeset && |
846 | (outp != NULL) && (io->out_size == 4) && |
846 | (outp != NULL) && (io->out_size == 4) && |
847 | (io->inp_size == *outp * sizeof(mode_t)) ) |
847 | (io->inp_size == *outp * sizeof(mode_t)) ) |
848 | { |
848 | { |
849 | retval = get_modes((mode_t*)inp, outp); |
849 | retval = get_modes((mode_t*)inp, outp); |
850 | }; |
850 | }; |
851 | break; |
851 | break; |
852 | 852 | ||
853 | case SRV_SET_MODE: |
853 | case SRV_SET_MODE: |
854 | dbgprintf("SRV_SET_MODE inp %x inp_size %x\n", |
854 | dbgprintf("SRV_SET_MODE inp %x inp_size %x\n", |
855 | inp, io->inp_size); |
855 | inp, io->inp_size); |
856 | 856 | ||
857 | if( radeon_modeset && |
857 | if( radeon_modeset && |
858 | (inp != NULL) && |
858 | (inp != NULL) && |
859 | (io->inp_size == sizeof(mode_t)) ) |
859 | (io->inp_size == sizeof(mode_t)) ) |
860 | { |
860 | { |
861 | retval = set_user_mode((mode_t*)inp); |
861 | retval = set_user_mode((mode_t*)inp); |
862 | }; |
862 | }; |
863 | break; |
863 | break; |
864 | }; |
864 | }; |
865 | 865 | ||
866 | return retval; |
866 | return retval; |
867 | } |
867 | } |
868 | 868 | ||
869 | static char log[256]; |
869 | static char log[256]; |
870 | static dev_t device; |
870 | static dev_t device; |
871 | 871 | ||
872 | u32_t drvEntry(int action, char *cmdline) |
872 | u32_t drvEntry(int action, char *cmdline) |
873 | { |
873 | { |
874 | struct pci_device_id *ent; |
874 | struct pci_device_id *ent; |
875 | 875 | ||
876 | int err; |
876 | int err; |
877 | u32_t retval = 0; |
877 | u32_t retval = 0; |
878 | 878 | ||
879 | if(action != 1) |
879 | if(action != 1) |
880 | return 0; |
880 | return 0; |
881 | 881 | ||
882 | if( GetService("DISPLAY") != 0 ) |
882 | if( GetService("DISPLAY") != 0 ) |
883 | return 0; |
883 | return 0; |
884 | 884 | ||
885 | if( cmdline && *cmdline ) |
885 | if( cmdline && *cmdline ) |
886 | parse_cmdline(cmdline, &usermode, log, &radeon_modeset); |
886 | parse_cmdline(cmdline, &usermode, log, &radeon_modeset); |
887 | 887 | ||
888 | if(!dbg_open(log)) |
888 | if(!dbg_open(log)) |
889 | { |
889 | { |
890 | strcpy(log, "/rd/1/drivers/atikms.log"); |
890 | strcpy(log, "/rd/1/drivers/atikms.log"); |
891 | 891 | ||
892 | if(!dbg_open(log)) |
892 | if(!dbg_open(log)) |
893 | { |
893 | { |
894 | printf("Can't open %s\nExit\n", log); |
894 | printf("Can't open %s\nExit\n", log); |
895 | return 0; |
895 | return 0; |
896 | }; |
896 | }; |
897 | } |
897 | } |
898 | dbgprintf("Radeon RC07 cmdline %s\n", cmdline); |
898 | dbgprintf("Radeon RC08 cmdline %s\n", cmdline); |
899 | 899 | ||
900 | enum_pci_devices(); |
900 | enum_pci_devices(); |
901 | 901 | ||
902 | ent = find_pci_device(&device, pciidlist); |
902 | ent = find_pci_device(&device, pciidlist); |
903 | 903 | ||
904 | if( unlikely(ent == NULL) ) |
904 | if( unlikely(ent == NULL) ) |
905 | { |
905 | { |
906 | dbgprintf("device not found\n"); |
906 | dbgprintf("device not found\n"); |
907 | return 0; |
907 | return 0; |
908 | }; |
908 | }; |
909 | 909 | ||
910 | dbgprintf("device %x:%x\n", device.pci_dev.vendor, |
910 | dbgprintf("device %x:%x\n", device.pci_dev.vendor, |
911 | device.pci_dev.device); |
911 | device.pci_dev.device); |
912 | 912 | ||
913 | err = drm_get_dev(&device.pci_dev, ent); |
913 | err = drm_get_dev(&device.pci_dev, ent); |
914 | 914 | ||
915 | err = RegService("DISPLAY", display_handler); |
915 | err = RegService("DISPLAY", display_handler); |
916 | 916 | ||
917 | if( err != 0) |
917 | if( err != 0) |
918 | dbgprintf("Set DISPLAY handler\n"); |
918 | dbgprintf("Set DISPLAY handler\n"); |
919 | 919 | ||
920 | return err; |
920 | return err; |
921 | };>><>><>=>>>>>>>>> |
921 | };>><>><>=>>>>>>>>> |