Rev 1321 | Rev 1404 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1321 | Rev 1403 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | /* RS600 / Radeon X1250/X1270 integrated GPU |
28 | /* RS600 / Radeon X1250/X1270 integrated GPU |
29 | * |
29 | * |
30 | * This file gather function specific to RS600 which is the IGP of |
30 | * This file gather function specific to RS600 which is the IGP of |
31 | * the X1250/X1270 family supporting intel CPU (while RS690/RS740 |
31 | * the X1250/X1270 family supporting intel CPU (while RS690/RS740 |
32 | * is the X1250/X1270 supporting AMD CPU). The display engine are |
32 | * is the X1250/X1270 supporting AMD CPU). The display engine are |
33 | * the avivo one, bios is an atombios, 3D block are the one of the |
33 | * the avivo one, bios is an atombios, 3D block are the one of the |
34 | * R4XX family. The GART is different from the RS400 one and is very |
34 | * R4XX family. The GART is different from the RS400 one and is very |
35 | * close to the one of the R600 family (R600 likely being an evolution |
35 | * close to the one of the R600 family (R600 likely being an evolution |
36 | * of the RS600 GART block). |
36 | * of the RS600 GART block). |
37 | */ |
37 | */ |
38 | #include "drmP.h" |
38 | #include "drmP.h" |
39 | #include "radeon.h" |
39 | #include "radeon.h" |
40 | #include "atom.h" |
40 | #include "atom.h" |
41 | #include "rs600d.h" |
41 | #include "rs600d.h" |
42 | 42 | ||
43 | #include "rs600_reg_safe.h" |
43 | #include "rs600_reg_safe.h" |
44 | 44 | ||
45 | void rs600_gpu_init(struct radeon_device *rdev); |
45 | void rs600_gpu_init(struct radeon_device *rdev); |
46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
47 | 47 | ||
48 | int rs600_mc_init(struct radeon_device *rdev) |
48 | int rs600_mc_init(struct radeon_device *rdev) |
49 | { |
49 | { |
50 | /* read back the MC value from the hw */ |
50 | /* read back the MC value from the hw */ |
51 | int r; |
51 | int r; |
52 | u32 tmp; |
52 | u32 tmp; |
53 | 53 | ||
54 | /* Setup GPU memory space */ |
54 | /* Setup GPU memory space */ |
55 | tmp = RREG32_MC(R_000004_MC_FB_LOCATION); |
55 | tmp = RREG32_MC(R_000004_MC_FB_LOCATION); |
56 | rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; |
56 | rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; |
57 | rdev->mc.gtt_location = 0xffffffffUL; |
57 | rdev->mc.gtt_location = 0xffffffffUL; |
58 | r = radeon_mc_setup(rdev); |
58 | r = radeon_mc_setup(rdev); |
- | 59 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
|
59 | if (r) |
60 | if (r) |
60 | return r; |
61 | return r; |
61 | return 0; |
62 | return 0; |
62 | } |
63 | } |
63 | 64 | ||
64 | /* hpd for digital panel detect/disconnect */ |
65 | /* hpd for digital panel detect/disconnect */ |
65 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
66 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
66 | { |
67 | { |
67 | u32 tmp; |
68 | u32 tmp; |
68 | bool connected = false; |
69 | bool connected = false; |
69 | 70 | ||
70 | switch (hpd) { |
71 | switch (hpd) { |
71 | case RADEON_HPD_1: |
72 | case RADEON_HPD_1: |
72 | tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); |
73 | tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); |
73 | if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) |
74 | if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) |
74 | connected = true; |
75 | connected = true; |
75 | break; |
76 | break; |
76 | case RADEON_HPD_2: |
77 | case RADEON_HPD_2: |
77 | tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); |
78 | tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); |
78 | if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) |
79 | if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) |
79 | connected = true; |
80 | connected = true; |
80 | break; |
81 | break; |
81 | default: |
82 | default: |
82 | break; |
83 | break; |
83 | } |
84 | } |
84 | return connected; |
85 | return connected; |
85 | } |
86 | } |
86 | 87 | ||
87 | void rs600_hpd_set_polarity(struct radeon_device *rdev, |
88 | void rs600_hpd_set_polarity(struct radeon_device *rdev, |
88 | enum radeon_hpd_id hpd) |
89 | enum radeon_hpd_id hpd) |
89 | { |
90 | { |
90 | u32 tmp; |
91 | u32 tmp; |
91 | bool connected = rs600_hpd_sense(rdev, hpd); |
92 | bool connected = rs600_hpd_sense(rdev, hpd); |
92 | 93 | ||
93 | switch (hpd) { |
94 | switch (hpd) { |
94 | case RADEON_HPD_1: |
95 | case RADEON_HPD_1: |
95 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
96 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
96 | if (connected) |
97 | if (connected) |
97 | tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); |
98 | tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); |
98 | else |
99 | else |
99 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); |
100 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); |
100 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
101 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
101 | break; |
102 | break; |
102 | case RADEON_HPD_2: |
103 | case RADEON_HPD_2: |
103 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
104 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
104 | if (connected) |
105 | if (connected) |
105 | tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); |
106 | tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); |
106 | else |
107 | else |
107 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); |
108 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); |
108 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
109 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
109 | break; |
110 | break; |
110 | default: |
111 | default: |
111 | break; |
112 | break; |
112 | } |
113 | } |
113 | } |
114 | } |
114 | 115 | ||
115 | void rs600_hpd_init(struct radeon_device *rdev) |
116 | void rs600_hpd_init(struct radeon_device *rdev) |
116 | { |
117 | { |
117 | struct drm_device *dev = rdev->ddev; |
118 | struct drm_device *dev = rdev->ddev; |
118 | struct drm_connector *connector; |
119 | struct drm_connector *connector; |
119 | 120 | ||
120 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
121 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
121 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
122 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
122 | switch (radeon_connector->hpd.hpd) { |
123 | switch (radeon_connector->hpd.hpd) { |
123 | case RADEON_HPD_1: |
124 | case RADEON_HPD_1: |
124 | WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, |
125 | WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, |
125 | S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); |
126 | S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); |
126 | rdev->irq.hpd[0] = true; |
127 | // rdev->irq.hpd[0] = true; |
127 | break; |
128 | break; |
128 | case RADEON_HPD_2: |
129 | case RADEON_HPD_2: |
129 | WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, |
130 | WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, |
130 | S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); |
131 | S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); |
131 | rdev->irq.hpd[1] = true; |
132 | // rdev->irq.hpd[1] = true; |
132 | break; |
133 | break; |
133 | default: |
134 | default: |
134 | break; |
135 | break; |
135 | } |
136 | } |
136 | } |
137 | } |
- | 138 | // if (rdev->irq.installed) |
|
137 | rs600_irq_set(rdev); |
139 | // rs600_irq_set(rdev); |
138 | } |
140 | } |
139 | 141 | ||
140 | void rs600_hpd_fini(struct radeon_device *rdev) |
142 | void rs600_hpd_fini(struct radeon_device *rdev) |
141 | { |
143 | { |
142 | struct drm_device *dev = rdev->ddev; |
144 | struct drm_device *dev = rdev->ddev; |
143 | struct drm_connector *connector; |
145 | struct drm_connector *connector; |
144 | 146 | ||
145 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
147 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
146 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
148 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
147 | switch (radeon_connector->hpd.hpd) { |
149 | switch (radeon_connector->hpd.hpd) { |
148 | case RADEON_HPD_1: |
150 | case RADEON_HPD_1: |
149 | WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, |
151 | WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, |
150 | S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); |
152 | S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); |
151 | rdev->irq.hpd[0] = false; |
153 | // rdev->irq.hpd[0] = false; |
152 | break; |
154 | break; |
153 | case RADEON_HPD_2: |
155 | case RADEON_HPD_2: |
154 | WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, |
156 | WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, |
155 | S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); |
157 | S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); |
156 | rdev->irq.hpd[1] = false; |
158 | // rdev->irq.hpd[1] = false; |
157 | break; |
159 | break; |
158 | default: |
160 | default: |
159 | break; |
161 | break; |
160 | } |
162 | } |
161 | } |
163 | } |
162 | } |
164 | } |
163 | 165 | ||
164 | /* |
166 | /* |
165 | * GART. |
167 | * GART. |
166 | */ |
168 | */ |
167 | void rs600_gart_tlb_flush(struct radeon_device *rdev) |
169 | void rs600_gart_tlb_flush(struct radeon_device *rdev) |
168 | { |
170 | { |
169 | uint32_t tmp; |
171 | uint32_t tmp; |
170 | 172 | ||
171 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
173 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
172 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
174 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
173 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
175 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
174 | 176 | ||
175 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
177 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
176 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); |
178 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); |
177 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
179 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
178 | 180 | ||
179 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
181 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
180 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
182 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
181 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
183 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
182 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
184 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
183 | } |
185 | } |
184 | 186 | ||
185 | int rs600_gart_init(struct radeon_device *rdev) |
187 | int rs600_gart_init(struct radeon_device *rdev) |
186 | { |
188 | { |
187 | int r; |
189 | int r; |
188 | 190 | ||
189 | if (rdev->gart.table.vram.robj) { |
191 | if (rdev->gart.table.vram.robj) { |
190 | WARN(1, "RS600 GART already initialized.\n"); |
192 | WARN(1, "RS600 GART already initialized.\n"); |
191 | return 0; |
193 | return 0; |
192 | } |
194 | } |
193 | /* Initialize common gart structure */ |
195 | /* Initialize common gart structure */ |
194 | r = radeon_gart_init(rdev); |
196 | r = radeon_gart_init(rdev); |
195 | if (r) { |
197 | if (r) { |
196 | return r; |
198 | return r; |
197 | } |
199 | } |
198 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; |
200 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; |
199 | return radeon_gart_table_vram_alloc(rdev); |
201 | return radeon_gart_table_vram_alloc(rdev); |
200 | } |
202 | } |
201 | 203 | ||
202 | int rs600_gart_enable(struct radeon_device *rdev) |
204 | int rs600_gart_enable(struct radeon_device *rdev) |
203 | { |
205 | { |
204 | u32 tmp; |
206 | u32 tmp; |
205 | int r, i; |
207 | int r, i; |
206 | 208 | ||
207 | if (rdev->gart.table.vram.robj == NULL) { |
209 | if (rdev->gart.table.vram.robj == NULL) { |
208 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
210 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
209 | return -EINVAL; |
211 | return -EINVAL; |
210 | } |
212 | } |
211 | r = radeon_gart_table_vram_pin(rdev); |
213 | r = radeon_gart_table_vram_pin(rdev); |
212 | if (r) |
214 | if (r) |
213 | return r; |
215 | return r; |
214 | /* Enable bus master */ |
216 | /* Enable bus master */ |
215 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; |
217 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; |
216 | WREG32(R_00004C_BUS_CNTL, tmp); |
218 | WREG32(R_00004C_BUS_CNTL, tmp); |
217 | /* FIXME: setup default page */ |
219 | /* FIXME: setup default page */ |
218 | WREG32_MC(R_000100_MC_PT0_CNTL, |
220 | WREG32_MC(R_000100_MC_PT0_CNTL, |
219 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
221 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
220 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
222 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
221 | 223 | ||
222 | for (i = 0; i < 19; i++) { |
224 | for (i = 0; i < 19; i++) { |
223 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
225 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
224 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
226 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
225 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
227 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
226 | V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | |
228 | V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | |
227 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
229 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
228 | V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | |
230 | V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | |
229 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | |
231 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | |
230 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | |
232 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | |
231 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); |
233 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); |
232 | } |
234 | } |
233 | /* enable first context */ |
235 | /* enable first context */ |
234 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
236 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
235 | S_000102_ENABLE_PAGE_TABLE(1) | |
237 | S_000102_ENABLE_PAGE_TABLE(1) | |
236 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
238 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
237 | 239 | ||
238 | /* disable all other contexts */ |
240 | /* disable all other contexts */ |
239 | for (i = 1; i < 8; i++) |
241 | for (i = 1; i < 8; i++) |
240 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
242 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
241 | 243 | ||
242 | /* setup the page table */ |
244 | /* setup the page table */ |
243 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
245 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
244 | rdev->gart.table_addr); |
246 | rdev->gart.table_addr); |
245 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); |
247 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); |
246 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); |
248 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); |
247 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
249 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
248 | 250 | ||
249 | /* System context maps to VRAM space */ |
251 | /* System context maps to VRAM space */ |
250 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); |
252 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); |
251 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); |
253 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); |
252 | 254 | ||
253 | /* enable page tables */ |
255 | /* enable page tables */ |
254 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
256 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
255 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
257 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
256 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
258 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
257 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); |
259 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); |
258 | rs600_gart_tlb_flush(rdev); |
260 | rs600_gart_tlb_flush(rdev); |
259 | rdev->gart.ready = true; |
261 | rdev->gart.ready = true; |
260 | return 0; |
262 | return 0; |
261 | } |
263 | } |
262 | 264 | ||
263 | void rs600_gart_disable(struct radeon_device *rdev) |
265 | void rs600_gart_disable(struct radeon_device *rdev) |
264 | { |
266 | { |
265 | u32 tmp; |
267 | u32 tmp; |
266 | int r; |
268 | int r; |
267 | 269 | ||
268 | /* FIXME: disable out of gart access */ |
270 | /* FIXME: disable out of gart access */ |
269 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
271 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
270 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
272 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
271 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
273 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
272 | if (rdev->gart.table.vram.robj) { |
274 | if (rdev->gart.table.vram.robj) { |
273 | // radeon_object_kunmap(rdev->gart.table.vram.robj); |
275 | // radeon_object_kunmap(rdev->gart.table.vram.robj); |
274 | // radeon_object_unpin(rdev->gart.table.vram.robj); |
276 | // radeon_object_unpin(rdev->gart.table.vram.robj); |
275 | } |
277 | } |
276 | } |
278 | } |
277 | 279 | ||
278 | void rs600_gart_fini(struct radeon_device *rdev) |
280 | void rs600_gart_fini(struct radeon_device *rdev) |
279 | { |
281 | { |
280 | rs600_gart_disable(rdev); |
282 | rs600_gart_disable(rdev); |
281 | radeon_gart_table_vram_free(rdev); |
283 | radeon_gart_table_vram_free(rdev); |
282 | radeon_gart_fini(rdev); |
284 | radeon_gart_fini(rdev); |
283 | } |
285 | } |
284 | 286 | ||
285 | #define R600_PTE_VALID (1 << 0) |
287 | #define R600_PTE_VALID (1 << 0) |
286 | #define R600_PTE_SYSTEM (1 << 1) |
288 | #define R600_PTE_SYSTEM (1 << 1) |
287 | #define R600_PTE_SNOOPED (1 << 2) |
289 | #define R600_PTE_SNOOPED (1 << 2) |
288 | #define R600_PTE_READABLE (1 << 5) |
290 | #define R600_PTE_READABLE (1 << 5) |
289 | #define R600_PTE_WRITEABLE (1 << 6) |
291 | #define R600_PTE_WRITEABLE (1 << 6) |
290 | 292 | ||
291 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
293 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
292 | { |
294 | { |
293 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
295 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
294 | 296 | ||
295 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
297 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
296 | return -EINVAL; |
298 | return -EINVAL; |
297 | } |
299 | } |
298 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
300 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
299 | addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
301 | addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
300 | addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; |
302 | addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; |
301 | writeq(addr, ((void __iomem *)ptr) + (i * 8)); |
303 | writeq(addr, ((void __iomem *)ptr) + (i * 8)); |
302 | return 0; |
304 | return 0; |
303 | } |
305 | } |
- | 306 | ||
304 | 307 | /* |
|
305 | int rs600_irq_set(struct radeon_device *rdev) |
308 | int rs600_irq_set(struct radeon_device *rdev) |
306 | { |
309 | { |
307 | uint32_t tmp = 0; |
310 | uint32_t tmp = 0; |
308 | uint32_t mode_int = 0; |
311 | uint32_t mode_int = 0; |
309 | u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & |
312 | u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & |
310 | ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); |
313 | ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); |
311 | u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & |
314 | u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & |
312 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
315 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
- | 316 | ||
- | 317 | if (!rdev->irq.installed) { |
|
- | 318 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); |
|
- | 319 | WREG32(R_000040_GEN_INT_CNTL, 0); |
|
- | 320 | return -EINVAL; |
|
313 | 321 | } |
|
314 | if (rdev->irq.sw_int) { |
322 | if (rdev->irq.sw_int) { |
315 | tmp |= S_000040_SW_INT_EN(1); |
323 | tmp |= S_000040_SW_INT_EN(1); |
316 | } |
324 | } |
317 | if (rdev->irq.crtc_vblank_int[0]) { |
325 | if (rdev->irq.crtc_vblank_int[0]) { |
318 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
326 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
319 | } |
327 | } |
320 | if (rdev->irq.crtc_vblank_int[1]) { |
328 | if (rdev->irq.crtc_vblank_int[1]) { |
321 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
329 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
322 | } |
330 | } |
323 | if (rdev->irq.hpd[0]) { |
331 | if (rdev->irq.hpd[0]) { |
324 | hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); |
332 | hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); |
325 | } |
333 | } |
326 | if (rdev->irq.hpd[1]) { |
334 | if (rdev->irq.hpd[1]) { |
327 | hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
335 | hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
328 | } |
336 | } |
329 | WREG32(R_000040_GEN_INT_CNTL, tmp); |
337 | WREG32(R_000040_GEN_INT_CNTL, tmp); |
330 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); |
338 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); |
331 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); |
339 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); |
332 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
340 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
333 | return 0; |
341 | return 0; |
334 | } |
342 | } |
- | 343 | */ |
|
335 | 344 | ||
336 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) |
345 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) |
337 | { |
346 | { |
338 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
347 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
339 | uint32_t irq_mask = ~C_000044_SW_INT; |
348 | uint32_t irq_mask = ~C_000044_SW_INT; |
340 | u32 tmp; |
349 | u32 tmp; |
341 | 350 | ||
342 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
351 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
343 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
352 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
344 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { |
353 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { |
345 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
354 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
346 | S_006534_D1MODE_VBLANK_ACK(1)); |
355 | S_006534_D1MODE_VBLANK_ACK(1)); |
347 | } |
356 | } |
348 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { |
357 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { |
349 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
358 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
350 | S_006D34_D2MODE_VBLANK_ACK(1)); |
359 | S_006D34_D2MODE_VBLANK_ACK(1)); |
351 | } |
360 | } |
352 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { |
361 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { |
353 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
362 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
354 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); |
363 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); |
355 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
364 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
356 | } |
365 | } |
357 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { |
366 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { |
358 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
367 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
359 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); |
368 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); |
360 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
369 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
361 | } |
370 | } |
362 | } else { |
371 | } else { |
363 | *r500_disp_int = 0; |
372 | *r500_disp_int = 0; |
364 | } |
373 | } |
365 | 374 | ||
366 | if (irqs) { |
375 | if (irqs) { |
367 | WREG32(R_000044_GEN_INT_STATUS, irqs); |
376 | WREG32(R_000044_GEN_INT_STATUS, irqs); |
368 | } |
377 | } |
369 | return irqs & irq_mask; |
378 | return irqs & irq_mask; |
370 | } |
379 | } |
371 | 380 | ||
372 | void rs600_irq_disable(struct radeon_device *rdev) |
381 | void rs600_irq_disable(struct radeon_device *rdev) |
373 | { |
382 | { |
374 | u32 tmp; |
383 | u32 tmp; |
375 | 384 | ||
376 | WREG32(R_000040_GEN_INT_CNTL, 0); |
385 | WREG32(R_000040_GEN_INT_CNTL, 0); |
377 | WREG32(R_006540_DxMODE_INT_MASK, 0); |
386 | WREG32(R_006540_DxMODE_INT_MASK, 0); |
378 | /* Wait and acknowledge irq */ |
387 | /* Wait and acknowledge irq */ |
379 | mdelay(1); |
388 | mdelay(1); |
380 | rs600_irq_ack(rdev, &tmp); |
389 | rs600_irq_ack(rdev, &tmp); |
381 | } |
390 | } |
382 | 391 | ||
383 | 392 | ||
384 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) |
393 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) |
385 | { |
394 | { |
386 | if (crtc == 0) |
395 | if (crtc == 0) |
387 | return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); |
396 | return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); |
388 | else |
397 | else |
389 | return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); |
398 | return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); |
390 | } |
399 | } |
391 | 400 | ||
392 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
401 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
393 | { |
402 | { |
394 | unsigned i; |
403 | unsigned i; |
395 | 404 | ||
396 | for (i = 0; i < rdev->usec_timeout; i++) { |
405 | for (i = 0; i < rdev->usec_timeout; i++) { |
397 | if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) |
406 | if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) |
398 | return 0; |
407 | return 0; |
399 | udelay(1); |
408 | udelay(1); |
400 | } |
409 | } |
401 | return -1; |
410 | return -1; |
402 | } |
411 | } |
403 | 412 | ||
404 | void rs600_gpu_init(struct radeon_device *rdev) |
413 | void rs600_gpu_init(struct radeon_device *rdev) |
405 | { |
414 | { |
406 | r100_hdp_reset(rdev); |
415 | r100_hdp_reset(rdev); |
407 | r420_pipes_init(rdev); |
416 | r420_pipes_init(rdev); |
408 | /* Wait for mc idle */ |
417 | /* Wait for mc idle */ |
409 | if (rs600_mc_wait_for_idle(rdev)) |
418 | if (rs600_mc_wait_for_idle(rdev)) |
410 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
419 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
411 | } |
420 | } |
412 | 421 | ||
413 | void rs600_vram_info(struct radeon_device *rdev) |
422 | void rs600_vram_info(struct radeon_device *rdev) |
414 | { |
423 | { |
415 | rdev->mc.vram_is_ddr = true; |
424 | rdev->mc.vram_is_ddr = true; |
416 | rdev->mc.vram_width = 128; |
425 | rdev->mc.vram_width = 128; |
417 | 426 | ||
418 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
427 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
419 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
428 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
420 | 429 | ||
421 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
430 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
422 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
431 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
423 | 432 | ||
424 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) |
433 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) |
425 | rdev->mc.mc_vram_size = rdev->mc.aper_size; |
434 | rdev->mc.mc_vram_size = rdev->mc.aper_size; |
426 | 435 | ||
427 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) |
436 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) |
428 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
437 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
429 | } |
438 | } |
430 | 439 | ||
431 | void rs600_bandwidth_update(struct radeon_device *rdev) |
440 | void rs600_bandwidth_update(struct radeon_device *rdev) |
432 | { |
441 | { |
433 | /* FIXME: implement, should this be like rs690 ? */ |
442 | /* FIXME: implement, should this be like rs690 ? */ |
434 | } |
443 | } |
435 | 444 | ||
436 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
445 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
437 | { |
446 | { |
438 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
447 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
439 | S_000070_MC_IND_CITF_ARB0(1)); |
448 | S_000070_MC_IND_CITF_ARB0(1)); |
440 | return RREG32(R_000074_MC_IND_DATA); |
449 | return RREG32(R_000074_MC_IND_DATA); |
441 | } |
450 | } |
442 | 451 | ||
443 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
452 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
444 | { |
453 | { |
445 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
454 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
446 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); |
455 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); |
447 | WREG32(R_000074_MC_IND_DATA, v); |
456 | WREG32(R_000074_MC_IND_DATA, v); |
448 | } |
457 | } |
449 | 458 | ||
450 | void rs600_debugfs(struct radeon_device *rdev) |
459 | void rs600_debugfs(struct radeon_device *rdev) |
451 | { |
460 | { |
452 | if (r100_debugfs_rbbm_init(rdev)) |
461 | if (r100_debugfs_rbbm_init(rdev)) |
453 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
462 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
454 | } |
463 | } |
455 | 464 | ||
456 | void rs600_set_safe_registers(struct radeon_device *rdev) |
465 | void rs600_set_safe_registers(struct radeon_device *rdev) |
457 | { |
466 | { |
458 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; |
467 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; |
459 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); |
468 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); |
460 | } |
469 | } |
461 | 470 | ||
462 | static void rs600_mc_program(struct radeon_device *rdev) |
471 | static void rs600_mc_program(struct radeon_device *rdev) |
463 | { |
472 | { |
464 | struct rv515_mc_save save; |
473 | struct rv515_mc_save save; |
465 | 474 | ||
466 | /* Stops all mc clients */ |
475 | /* Stops all mc clients */ |
467 | rv515_mc_stop(rdev, &save); |
476 | rv515_mc_stop(rdev, &save); |
468 | 477 | ||
469 | /* Wait for mc idle */ |
478 | /* Wait for mc idle */ |
470 | if (rs600_mc_wait_for_idle(rdev)) |
479 | if (rs600_mc_wait_for_idle(rdev)) |
471 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
480 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
472 | 481 | ||
473 | /* FIXME: What does AGP means for such chipset ? */ |
482 | /* FIXME: What does AGP means for such chipset ? */ |
474 | WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); |
483 | WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); |
475 | WREG32_MC(R_000006_AGP_BASE, 0); |
484 | WREG32_MC(R_000006_AGP_BASE, 0); |
476 | WREG32_MC(R_000007_AGP_BASE_2, 0); |
485 | WREG32_MC(R_000007_AGP_BASE_2, 0); |
477 | /* Program MC */ |
486 | /* Program MC */ |
478 | WREG32_MC(R_000004_MC_FB_LOCATION, |
487 | WREG32_MC(R_000004_MC_FB_LOCATION, |
479 | S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | |
488 | S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | |
480 | S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
489 | S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
481 | WREG32(R_000134_HDP_FB_LOCATION, |
490 | WREG32(R_000134_HDP_FB_LOCATION, |
482 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); |
491 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); |
483 | 492 | ||
484 | rv515_mc_resume(rdev, &save); |
493 | rv515_mc_resume(rdev, &save); |
485 | } |
494 | } |
486 | 495 | ||
487 | static int rs600_startup(struct radeon_device *rdev) |
496 | static int rs600_startup(struct radeon_device *rdev) |
488 | { |
497 | { |
489 | int r; |
498 | int r; |
490 | 499 | ||
491 | rs600_mc_program(rdev); |
500 | rs600_mc_program(rdev); |
492 | /* Resume clock */ |
501 | /* Resume clock */ |
493 | rv515_clock_startup(rdev); |
502 | rv515_clock_startup(rdev); |
494 | /* Initialize GPU configuration (# pipes, ...) */ |
503 | /* Initialize GPU configuration (# pipes, ...) */ |
495 | rs600_gpu_init(rdev); |
504 | rs600_gpu_init(rdev); |
496 | /* Initialize GART (initialize after TTM so we can allocate |
505 | /* Initialize GART (initialize after TTM so we can allocate |
497 | * memory through TTM but finalize after TTM) */ |
506 | * memory through TTM but finalize after TTM) */ |
498 | r = rs600_gart_enable(rdev); |
507 | r = rs600_gart_enable(rdev); |
499 | if (r) |
508 | if (r) |
500 | return r; |
509 | return r; |
501 | /* Enable IRQ */ |
510 | /* Enable IRQ */ |
502 | // rs600_irq_set(rdev); |
511 | // rs600_irq_set(rdev); |
- | 512 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
|
503 | /* 1M ring buffer */ |
513 | /* 1M ring buffer */ |
504 | // r = r100_cp_init(rdev, 1024 * 1024); |
514 | // r = r100_cp_init(rdev, 1024 * 1024); |
505 | // if (r) { |
515 | // if (r) { |
506 | // dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
516 | // dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
507 | // return r; |
517 | // return r; |
508 | // } |
518 | // } |
509 | // r = r100_wb_init(rdev); |
519 | // r = r100_wb_init(rdev); |
510 | // if (r) |
520 | // if (r) |
511 | // dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
521 | // dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
512 | // r = r100_ib_init(rdev); |
522 | // r = r100_ib_init(rdev); |
513 | // if (r) { |
523 | // if (r) { |
514 | // dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
524 | // dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
515 | // return r; |
525 | // return r; |
516 | // } |
526 | // } |
517 | return 0; |
527 | return 0; |
518 | } |
528 | } |
519 | 529 | ||
520 | 530 | ||
521 | 531 | ||
522 | int rs600_init(struct radeon_device *rdev) |
532 | int rs600_init(struct radeon_device *rdev) |
523 | { |
533 | { |
524 | int r; |
534 | int r; |
525 | 535 | ||
526 | /* Disable VGA */ |
536 | /* Disable VGA */ |
527 | rv515_vga_render_disable(rdev); |
537 | rv515_vga_render_disable(rdev); |
528 | /* Initialize scratch registers */ |
538 | /* Initialize scratch registers */ |
529 | radeon_scratch_init(rdev); |
539 | radeon_scratch_init(rdev); |
530 | /* Initialize surface registers */ |
540 | /* Initialize surface registers */ |
531 | radeon_surface_init(rdev); |
541 | radeon_surface_init(rdev); |
532 | /* BIOS */ |
542 | /* BIOS */ |
533 | if (!radeon_get_bios(rdev)) { |
543 | if (!radeon_get_bios(rdev)) { |
534 | if (ASIC_IS_AVIVO(rdev)) |
544 | if (ASIC_IS_AVIVO(rdev)) |
535 | return -EINVAL; |
545 | return -EINVAL; |
536 | } |
546 | } |
537 | if (rdev->is_atom_bios) { |
547 | if (rdev->is_atom_bios) { |
538 | r = radeon_atombios_init(rdev); |
548 | r = radeon_atombios_init(rdev); |
539 | if (r) |
549 | if (r) |
540 | return r; |
550 | return r; |
541 | } else { |
551 | } else { |
542 | dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); |
552 | dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); |
543 | return -EINVAL; |
553 | return -EINVAL; |
544 | } |
554 | } |
545 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
555 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
546 | if (radeon_gpu_reset(rdev)) { |
556 | if (radeon_gpu_reset(rdev)) { |
547 | dev_warn(rdev->dev, |
557 | dev_warn(rdev->dev, |
548 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
558 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
549 | RREG32(R_000E40_RBBM_STATUS), |
559 | RREG32(R_000E40_RBBM_STATUS), |
550 | RREG32(R_0007C0_CP_STAT)); |
560 | RREG32(R_0007C0_CP_STAT)); |
551 | } |
561 | } |
552 | /* check if cards are posted or not */ |
562 | /* check if cards are posted or not */ |
553 | if (radeon_boot_test_post_card(rdev) == false) |
563 | if (radeon_boot_test_post_card(rdev) == false) |
554 | return -EINVAL; |
564 | return -EINVAL; |
555 | 565 | ||
556 | /* Initialize clocks */ |
566 | /* Initialize clocks */ |
557 | radeon_get_clock_info(rdev->ddev); |
567 | radeon_get_clock_info(rdev->ddev); |
558 | /* Initialize power management */ |
568 | /* Initialize power management */ |
559 | radeon_pm_init(rdev); |
569 | radeon_pm_init(rdev); |
560 | /* Get vram informations */ |
570 | /* Get vram informations */ |
561 | rs600_vram_info(rdev); |
571 | rs600_vram_info(rdev); |
562 | /* Initialize memory controller (also test AGP) */ |
572 | /* Initialize memory controller (also test AGP) */ |
563 | r = rs600_mc_init(rdev); |
573 | r = rs600_mc_init(rdev); |
564 | if (r) |
574 | if (r) |
565 | return r; |
575 | return r; |
566 | rs600_debugfs(rdev); |
576 | rs600_debugfs(rdev); |
567 | /* Fence driver */ |
577 | /* Fence driver */ |
568 | // r = radeon_fence_driver_init(rdev); |
578 | // r = radeon_fence_driver_init(rdev); |
569 | // if (r) |
579 | // if (r) |
570 | // return r; |
580 | // return r; |
571 | // r = radeon_irq_kms_init(rdev); |
581 | // r = radeon_irq_kms_init(rdev); |
572 | // if (r) |
582 | // if (r) |
573 | // return r; |
583 | // return r; |
574 | /* Memory manager */ |
584 | /* Memory manager */ |
575 | r = radeon_bo_init(rdev); |
585 | r = radeon_bo_init(rdev); |
576 | if (r) |
586 | if (r) |
577 | return r; |
587 | return r; |
578 | r = rs600_gart_init(rdev); |
588 | r = rs600_gart_init(rdev); |
579 | if (r) |
589 | if (r) |
580 | return r; |
590 | return r; |
581 | rs600_set_safe_registers(rdev); |
591 | rs600_set_safe_registers(rdev); |
582 | rdev->accel_working = true; |
592 | rdev->accel_working = true; |
583 | r = rs600_startup(rdev); |
593 | r = rs600_startup(rdev); |
584 | if (r) { |
594 | if (r) { |
585 | /* Somethings want wront with the accel init stop accel */ |
595 | /* Somethings want wront with the accel init stop accel */ |
586 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
596 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
587 | // rs600_suspend(rdev); |
597 | // rs600_suspend(rdev); |
588 | // r100_cp_fini(rdev); |
598 | // r100_cp_fini(rdev); |
589 | // r100_wb_fini(rdev); |
599 | // r100_wb_fini(rdev); |
590 | // r100_ib_fini(rdev); |
600 | // r100_ib_fini(rdev); |
591 | rs600_gart_fini(rdev); |
601 | rs600_gart_fini(rdev); |
592 | // radeon_irq_kms_fini(rdev); |
602 | // radeon_irq_kms_fini(rdev); |
593 | rdev->accel_working = false; |
603 | rdev->accel_working = false; |
594 | } |
604 | } |
595 | return 0; |
605 | return 0; |
596 | }>>><>><>><>><>><>>>><> |
606 | }>>><>><>><>><>><>>>><> |