Rev 2004 | Rev 2160 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2004 | Rev 2005 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | //#include |
29 | //#include |
30 | #include |
30 | #include |
31 | #include "drmP.h" |
31 | #include "drmP.h" |
32 | #include "radeon.h" |
32 | #include "radeon.h" |
33 | #include "radeon_asic.h" |
33 | #include "radeon_asic.h" |
34 | #include "radeon_drm.h" |
34 | #include "radeon_drm.h" |
35 | #include "rv770d.h" |
35 | #include "rv770d.h" |
36 | #include "atom.h" |
36 | #include "atom.h" |
37 | #include "avivod.h" |
37 | #include "avivod.h" |
38 | 38 | ||
39 | #define R700_PFP_UCODE_SIZE 848 |
39 | #define R700_PFP_UCODE_SIZE 848 |
40 | #define R700_PM4_UCODE_SIZE 1360 |
40 | #define R700_PM4_UCODE_SIZE 1360 |
41 | 41 | ||
42 | static void rv770_gpu_init(struct radeon_device *rdev); |
42 | static void rv770_gpu_init(struct radeon_device *rdev); |
43 | void rv770_fini(struct radeon_device *rdev); |
43 | void rv770_fini(struct radeon_device *rdev); |
44 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev); |
44 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev); |
45 | 45 | ||
46 | 46 | ||
47 | /* |
47 | /* |
48 | * GART |
48 | * GART |
49 | */ |
49 | */ |
50 | int rv770_pcie_gart_enable(struct radeon_device *rdev) |
50 | int rv770_pcie_gart_enable(struct radeon_device *rdev) |
51 | { |
51 | { |
52 | u32 tmp; |
52 | u32 tmp; |
53 | int r, i; |
53 | int r, i; |
54 | 54 | ||
55 | if (rdev->gart.table.vram.robj == NULL) { |
55 | if (rdev->gart.table.vram.robj == NULL) { |
56 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
56 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
57 | return -EINVAL; |
57 | return -EINVAL; |
58 | } |
58 | } |
59 | r = radeon_gart_table_vram_pin(rdev); |
59 | r = radeon_gart_table_vram_pin(rdev); |
60 | if (r) |
60 | if (r) |
61 | return r; |
61 | return r; |
62 | radeon_gart_restore(rdev); |
62 | radeon_gart_restore(rdev); |
63 | /* Setup L2 cache */ |
63 | /* Setup L2 cache */ |
64 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
64 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
65 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
65 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
66 | EFFECTIVE_L2_QUEUE_SIZE(7)); |
66 | EFFECTIVE_L2_QUEUE_SIZE(7)); |
67 | WREG32(VM_L2_CNTL2, 0); |
67 | WREG32(VM_L2_CNTL2, 0); |
68 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); |
68 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); |
69 | /* Setup TLB control */ |
69 | /* Setup TLB control */ |
70 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | |
70 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | |
71 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | |
71 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | |
72 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | |
72 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | |
73 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
73 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
74 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
74 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
75 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
75 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
76 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
76 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
77 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
77 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
78 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
78 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
79 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
79 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
80 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
80 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
81 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
81 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
82 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
82 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
83 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
83 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
84 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
84 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
85 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
85 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
86 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
86 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
87 | (u32)(rdev->dummy_page.addr >> 12)); |
87 | (u32)(rdev->dummy_page.addr >> 12)); |
88 | for (i = 1; i < 7; i++) |
88 | for (i = 1; i < 7; i++) |
89 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
89 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
90 | 90 | ||
91 | r600_pcie_gart_tlb_flush(rdev); |
91 | r600_pcie_gart_tlb_flush(rdev); |
92 | rdev->gart.ready = true; |
92 | rdev->gart.ready = true; |
93 | return 0; |
93 | return 0; |
94 | } |
94 | } |
95 | 95 | ||
96 | void rv770_pcie_gart_disable(struct radeon_device *rdev) |
96 | void rv770_pcie_gart_disable(struct radeon_device *rdev) |
97 | { |
97 | { |
98 | u32 tmp; |
98 | u32 tmp; |
99 | int i, r; |
99 | int i, r; |
100 | 100 | ||
101 | /* Disable all tables */ |
101 | /* Disable all tables */ |
102 | for (i = 0; i < 7; i++) |
102 | for (i = 0; i < 7; i++) |
103 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
103 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
104 | 104 | ||
105 | /* Setup L2 cache */ |
105 | /* Setup L2 cache */ |
106 | WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | |
106 | WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | |
107 | EFFECTIVE_L2_QUEUE_SIZE(7)); |
107 | EFFECTIVE_L2_QUEUE_SIZE(7)); |
108 | WREG32(VM_L2_CNTL2, 0); |
108 | WREG32(VM_L2_CNTL2, 0); |
109 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); |
109 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); |
110 | /* Setup TLB control */ |
110 | /* Setup TLB control */ |
111 | tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
111 | tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
112 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
112 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
113 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
113 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
114 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
114 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
115 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
115 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
116 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
116 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
117 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
117 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
118 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
118 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
119 | if (rdev->gart.table.vram.robj) { |
119 | if (rdev->gart.table.vram.robj) { |
120 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
120 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
121 | if (likely(r == 0)) { |
121 | if (likely(r == 0)) { |
122 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
122 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
123 | radeon_bo_unpin(rdev->gart.table.vram.robj); |
123 | radeon_bo_unpin(rdev->gart.table.vram.robj); |
124 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
124 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
125 | } |
125 | } |
126 | } |
126 | } |
127 | } |
127 | } |
128 | 128 | ||
129 | void rv770_pcie_gart_fini(struct radeon_device *rdev) |
129 | void rv770_pcie_gart_fini(struct radeon_device *rdev) |
130 | { |
130 | { |
131 | radeon_gart_fini(rdev); |
131 | radeon_gart_fini(rdev); |
132 | rv770_pcie_gart_disable(rdev); |
132 | rv770_pcie_gart_disable(rdev); |
133 | radeon_gart_table_vram_free(rdev); |
133 | radeon_gart_table_vram_free(rdev); |
134 | } |
134 | } |
135 | 135 | ||
136 | 136 | ||
137 | void rv770_agp_enable(struct radeon_device *rdev) |
137 | void rv770_agp_enable(struct radeon_device *rdev) |
138 | { |
138 | { |
139 | u32 tmp; |
139 | u32 tmp; |
140 | int i; |
140 | int i; |
141 | 141 | ||
142 | /* Setup L2 cache */ |
142 | /* Setup L2 cache */ |
143 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
143 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
144 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
144 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
145 | EFFECTIVE_L2_QUEUE_SIZE(7)); |
145 | EFFECTIVE_L2_QUEUE_SIZE(7)); |
146 | WREG32(VM_L2_CNTL2, 0); |
146 | WREG32(VM_L2_CNTL2, 0); |
147 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); |
147 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); |
148 | /* Setup TLB control */ |
148 | /* Setup TLB control */ |
149 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | |
149 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | |
150 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | |
150 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | |
151 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | |
151 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | |
152 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
152 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
153 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
153 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
154 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
154 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
155 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
155 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
156 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
156 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
157 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
157 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
158 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
158 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
159 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
159 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
160 | for (i = 0; i < 7; i++) |
160 | for (i = 0; i < 7; i++) |
161 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
161 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
162 | } |
162 | } |
163 | 163 | ||
164 | static void rv770_mc_program(struct radeon_device *rdev) |
164 | static void rv770_mc_program(struct radeon_device *rdev) |
165 | { |
165 | { |
166 | struct rv515_mc_save save; |
166 | struct rv515_mc_save save; |
167 | u32 tmp; |
167 | u32 tmp; |
168 | int i, j; |
168 | int i, j; |
169 | 169 | ||
170 | /* Initialize HDP */ |
170 | /* Initialize HDP */ |
171 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { |
171 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { |
172 | WREG32((0x2c14 + j), 0x00000000); |
172 | WREG32((0x2c14 + j), 0x00000000); |
173 | WREG32((0x2c18 + j), 0x00000000); |
173 | WREG32((0x2c18 + j), 0x00000000); |
174 | WREG32((0x2c1c + j), 0x00000000); |
174 | WREG32((0x2c1c + j), 0x00000000); |
175 | WREG32((0x2c20 + j), 0x00000000); |
175 | WREG32((0x2c20 + j), 0x00000000); |
176 | WREG32((0x2c24 + j), 0x00000000); |
176 | WREG32((0x2c24 + j), 0x00000000); |
177 | } |
177 | } |
178 | /* r7xx hw bug. Read from HDP_DEBUG1 rather |
178 | /* r7xx hw bug. Read from HDP_DEBUG1 rather |
179 | * than writing to HDP_REG_COHERENCY_FLUSH_CNTL |
179 | * than writing to HDP_REG_COHERENCY_FLUSH_CNTL |
180 | */ |
180 | */ |
181 | tmp = RREG32(HDP_DEBUG1); |
181 | tmp = RREG32(HDP_DEBUG1); |
182 | 182 | ||
183 | rv515_mc_stop(rdev, &save); |
183 | rv515_mc_stop(rdev, &save); |
184 | if (r600_mc_wait_for_idle(rdev)) { |
184 | if (r600_mc_wait_for_idle(rdev)) { |
185 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
185 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
186 | } |
186 | } |
187 | /* Lockout access through VGA aperture*/ |
187 | /* Lockout access through VGA aperture*/ |
188 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
188 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
189 | /* Update configuration */ |
189 | /* Update configuration */ |
190 | if (rdev->flags & RADEON_IS_AGP) { |
190 | if (rdev->flags & RADEON_IS_AGP) { |
191 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
191 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
192 | /* VRAM before AGP */ |
192 | /* VRAM before AGP */ |
193 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, |
193 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, |
194 | rdev->mc.vram_start >> 12); |
194 | rdev->mc.vram_start >> 12); |
195 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
195 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
196 | rdev->mc.gtt_end >> 12); |
196 | rdev->mc.gtt_end >> 12); |
197 | } else { |
197 | } else { |
198 | /* VRAM after AGP */ |
198 | /* VRAM after AGP */ |
199 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, |
199 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, |
200 | rdev->mc.gtt_start >> 12); |
200 | rdev->mc.gtt_start >> 12); |
201 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
201 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
202 | rdev->mc.vram_end >> 12); |
202 | rdev->mc.vram_end >> 12); |
203 | } |
203 | } |
204 | } else { |
204 | } else { |
205 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, |
205 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, |
206 | rdev->mc.vram_start >> 12); |
206 | rdev->mc.vram_start >> 12); |
207 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
207 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
208 | rdev->mc.vram_end >> 12); |
208 | rdev->mc.vram_end >> 12); |
209 | } |
209 | } |
210 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
210 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
211 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
211 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
212 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
212 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
213 | WREG32(MC_VM_FB_LOCATION, tmp); |
213 | WREG32(MC_VM_FB_LOCATION, tmp); |
214 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
214 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
215 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
215 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
216 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
216 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
217 | if (rdev->flags & RADEON_IS_AGP) { |
217 | if (rdev->flags & RADEON_IS_AGP) { |
218 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
218 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
219 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
219 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
220 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
220 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
221 | } else { |
221 | } else { |
222 | WREG32(MC_VM_AGP_BASE, 0); |
222 | WREG32(MC_VM_AGP_BASE, 0); |
223 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
223 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
224 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
224 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
225 | } |
225 | } |
226 | if (r600_mc_wait_for_idle(rdev)) { |
226 | if (r600_mc_wait_for_idle(rdev)) { |
227 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
227 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
228 | } |
228 | } |
229 | rv515_mc_resume(rdev, &save); |
229 | rv515_mc_resume(rdev, &save); |
230 | /* we need to own VRAM, so turn off the VGA renderer here |
230 | /* we need to own VRAM, so turn off the VGA renderer here |
231 | * to stop it overwriting our objects */ |
231 | * to stop it overwriting our objects */ |
232 | rv515_vga_render_disable(rdev); |
232 | rv515_vga_render_disable(rdev); |
233 | } |
233 | } |
234 | 234 | ||
235 | 235 | ||
236 | /* |
236 | /* |
237 | * CP. |
237 | * CP. |
238 | */ |
238 | */ |
239 | void r700_cp_stop(struct radeon_device *rdev) |
239 | void r700_cp_stop(struct radeon_device *rdev) |
240 | { |
240 | { |
241 | // radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
241 | // radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
242 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
242 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
243 | WREG32(SCRATCH_UMSK, 0); |
243 | WREG32(SCRATCH_UMSK, 0); |
244 | } |
244 | } |
245 | 245 | ||
246 | static int rv770_cp_load_microcode(struct radeon_device *rdev) |
246 | static int rv770_cp_load_microcode(struct radeon_device *rdev) |
247 | { |
247 | { |
248 | const __be32 *fw_data; |
248 | const __be32 *fw_data; |
249 | int i; |
249 | int i; |
250 | 250 | ||
251 | if (!rdev->me_fw || !rdev->pfp_fw) |
251 | if (!rdev->me_fw || !rdev->pfp_fw) |
252 | return -EINVAL; |
252 | return -EINVAL; |
253 | 253 | ||
254 | r700_cp_stop(rdev); |
254 | r700_cp_stop(rdev); |
255 | WREG32(CP_RB_CNTL, |
255 | WREG32(CP_RB_CNTL, |
256 | #ifdef __BIG_ENDIAN |
256 | #ifdef __BIG_ENDIAN |
257 | BUF_SWAP_32BIT | |
257 | BUF_SWAP_32BIT | |
258 | #endif |
258 | #endif |
259 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); |
259 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); |
260 | 260 | ||
261 | /* Reset cp */ |
261 | /* Reset cp */ |
262 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
262 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
263 | RREG32(GRBM_SOFT_RESET); |
263 | RREG32(GRBM_SOFT_RESET); |
264 | mdelay(15); |
264 | mdelay(15); |
265 | WREG32(GRBM_SOFT_RESET, 0); |
265 | WREG32(GRBM_SOFT_RESET, 0); |
266 | 266 | ||
267 | fw_data = (const __be32 *)rdev->pfp_fw->data; |
267 | fw_data = (const __be32 *)rdev->pfp_fw->data; |
268 | WREG32(CP_PFP_UCODE_ADDR, 0); |
268 | WREG32(CP_PFP_UCODE_ADDR, 0); |
269 | for (i = 0; i < R700_PFP_UCODE_SIZE; i++) |
269 | for (i = 0; i < R700_PFP_UCODE_SIZE; i++) |
270 | WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); |
270 | WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); |
271 | WREG32(CP_PFP_UCODE_ADDR, 0); |
271 | WREG32(CP_PFP_UCODE_ADDR, 0); |
272 | 272 | ||
273 | fw_data = (const __be32 *)rdev->me_fw->data; |
273 | fw_data = (const __be32 *)rdev->me_fw->data; |
274 | WREG32(CP_ME_RAM_WADDR, 0); |
274 | WREG32(CP_ME_RAM_WADDR, 0); |
275 | for (i = 0; i < R700_PM4_UCODE_SIZE; i++) |
275 | for (i = 0; i < R700_PM4_UCODE_SIZE; i++) |
276 | WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); |
276 | WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); |
277 | 277 | ||
278 | WREG32(CP_PFP_UCODE_ADDR, 0); |
278 | WREG32(CP_PFP_UCODE_ADDR, 0); |
279 | WREG32(CP_ME_RAM_WADDR, 0); |
279 | WREG32(CP_ME_RAM_WADDR, 0); |
280 | WREG32(CP_ME_RAM_RADDR, 0); |
280 | WREG32(CP_ME_RAM_RADDR, 0); |
281 | return 0; |
281 | return 0; |
282 | } |
282 | } |
283 | 283 | ||
284 | 284 | ||
285 | /* |
285 | /* |
286 | * Core functions |
286 | * Core functions |
287 | */ |
287 | */ |
288 | static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, |
288 | static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, |
289 | u32 num_tile_pipes, |
289 | u32 num_tile_pipes, |
290 | u32 num_backends, |
290 | u32 num_backends, |
291 | u32 backend_disable_mask) |
291 | u32 backend_disable_mask) |
292 | { |
292 | { |
293 | u32 backend_map = 0; |
293 | u32 backend_map = 0; |
294 | u32 enabled_backends_mask; |
294 | u32 enabled_backends_mask; |
295 | u32 enabled_backends_count; |
295 | u32 enabled_backends_count; |
296 | u32 cur_pipe; |
296 | u32 cur_pipe; |
297 | u32 swizzle_pipe[R7XX_MAX_PIPES]; |
297 | u32 swizzle_pipe[R7XX_MAX_PIPES]; |
298 | u32 cur_backend; |
298 | u32 cur_backend; |
299 | u32 i; |
299 | u32 i; |
300 | bool force_no_swizzle; |
300 | bool force_no_swizzle; |
301 | 301 | ||
302 | if (num_tile_pipes > R7XX_MAX_PIPES) |
302 | if (num_tile_pipes > R7XX_MAX_PIPES) |
303 | num_tile_pipes = R7XX_MAX_PIPES; |
303 | num_tile_pipes = R7XX_MAX_PIPES; |
304 | if (num_tile_pipes < 1) |
304 | if (num_tile_pipes < 1) |
305 | num_tile_pipes = 1; |
305 | num_tile_pipes = 1; |
306 | if (num_backends > R7XX_MAX_BACKENDS) |
306 | if (num_backends > R7XX_MAX_BACKENDS) |
307 | num_backends = R7XX_MAX_BACKENDS; |
307 | num_backends = R7XX_MAX_BACKENDS; |
308 | if (num_backends < 1) |
308 | if (num_backends < 1) |
309 | num_backends = 1; |
309 | num_backends = 1; |
310 | 310 | ||
311 | enabled_backends_mask = 0; |
311 | enabled_backends_mask = 0; |
312 | enabled_backends_count = 0; |
312 | enabled_backends_count = 0; |
313 | for (i = 0; i < R7XX_MAX_BACKENDS; ++i) { |
313 | for (i = 0; i < R7XX_MAX_BACKENDS; ++i) { |
314 | if (((backend_disable_mask >> i) & 1) == 0) { |
314 | if (((backend_disable_mask >> i) & 1) == 0) { |
315 | enabled_backends_mask |= (1 << i); |
315 | enabled_backends_mask |= (1 << i); |
316 | ++enabled_backends_count; |
316 | ++enabled_backends_count; |
317 | } |
317 | } |
318 | if (enabled_backends_count == num_backends) |
318 | if (enabled_backends_count == num_backends) |
319 | break; |
319 | break; |
320 | } |
320 | } |
321 | 321 | ||
322 | if (enabled_backends_count == 0) { |
322 | if (enabled_backends_count == 0) { |
323 | enabled_backends_mask = 1; |
323 | enabled_backends_mask = 1; |
324 | enabled_backends_count = 1; |
324 | enabled_backends_count = 1; |
325 | } |
325 | } |
326 | 326 | ||
327 | if (enabled_backends_count != num_backends) |
327 | if (enabled_backends_count != num_backends) |
328 | num_backends = enabled_backends_count; |
328 | num_backends = enabled_backends_count; |
329 | 329 | ||
330 | switch (rdev->family) { |
330 | switch (rdev->family) { |
331 | case CHIP_RV770: |
331 | case CHIP_RV770: |
332 | case CHIP_RV730: |
332 | case CHIP_RV730: |
333 | force_no_swizzle = false; |
333 | force_no_swizzle = false; |
334 | break; |
334 | break; |
335 | case CHIP_RV710: |
335 | case CHIP_RV710: |
336 | case CHIP_RV740: |
336 | case CHIP_RV740: |
337 | default: |
337 | default: |
338 | force_no_swizzle = true; |
338 | force_no_swizzle = true; |
339 | break; |
339 | break; |
340 | } |
340 | } |
341 | 341 | ||
342 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); |
342 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); |
343 | switch (num_tile_pipes) { |
343 | switch (num_tile_pipes) { |
344 | case 1: |
344 | case 1: |
345 | swizzle_pipe[0] = 0; |
345 | swizzle_pipe[0] = 0; |
346 | break; |
346 | break; |
347 | case 2: |
347 | case 2: |
348 | swizzle_pipe[0] = 0; |
348 | swizzle_pipe[0] = 0; |
349 | swizzle_pipe[1] = 1; |
349 | swizzle_pipe[1] = 1; |
350 | break; |
350 | break; |
351 | case 3: |
351 | case 3: |
352 | if (force_no_swizzle) { |
352 | if (force_no_swizzle) { |
353 | swizzle_pipe[0] = 0; |
353 | swizzle_pipe[0] = 0; |
354 | swizzle_pipe[1] = 1; |
354 | swizzle_pipe[1] = 1; |
355 | swizzle_pipe[2] = 2; |
355 | swizzle_pipe[2] = 2; |
356 | } else { |
356 | } else { |
357 | swizzle_pipe[0] = 0; |
357 | swizzle_pipe[0] = 0; |
358 | swizzle_pipe[1] = 2; |
358 | swizzle_pipe[1] = 2; |
359 | swizzle_pipe[2] = 1; |
359 | swizzle_pipe[2] = 1; |
360 | } |
360 | } |
361 | break; |
361 | break; |
362 | case 4: |
362 | case 4: |
363 | if (force_no_swizzle) { |
363 | if (force_no_swizzle) { |
364 | swizzle_pipe[0] = 0; |
364 | swizzle_pipe[0] = 0; |
365 | swizzle_pipe[1] = 1; |
365 | swizzle_pipe[1] = 1; |
366 | swizzle_pipe[2] = 2; |
366 | swizzle_pipe[2] = 2; |
367 | swizzle_pipe[3] = 3; |
367 | swizzle_pipe[3] = 3; |
368 | } else { |
368 | } else { |
369 | swizzle_pipe[0] = 0; |
369 | swizzle_pipe[0] = 0; |
370 | swizzle_pipe[1] = 2; |
370 | swizzle_pipe[1] = 2; |
371 | swizzle_pipe[2] = 3; |
371 | swizzle_pipe[2] = 3; |
372 | swizzle_pipe[3] = 1; |
372 | swizzle_pipe[3] = 1; |
373 | } |
373 | } |
374 | break; |
374 | break; |
375 | case 5: |
375 | case 5: |
376 | if (force_no_swizzle) { |
376 | if (force_no_swizzle) { |
377 | swizzle_pipe[0] = 0; |
377 | swizzle_pipe[0] = 0; |
378 | swizzle_pipe[1] = 1; |
378 | swizzle_pipe[1] = 1; |
379 | swizzle_pipe[2] = 2; |
379 | swizzle_pipe[2] = 2; |
380 | swizzle_pipe[3] = 3; |
380 | swizzle_pipe[3] = 3; |
381 | swizzle_pipe[4] = 4; |
381 | swizzle_pipe[4] = 4; |
382 | } else { |
382 | } else { |
383 | swizzle_pipe[0] = 0; |
383 | swizzle_pipe[0] = 0; |
384 | swizzle_pipe[1] = 2; |
384 | swizzle_pipe[1] = 2; |
385 | swizzle_pipe[2] = 4; |
385 | swizzle_pipe[2] = 4; |
386 | swizzle_pipe[3] = 1; |
386 | swizzle_pipe[3] = 1; |
387 | swizzle_pipe[4] = 3; |
387 | swizzle_pipe[4] = 3; |
388 | } |
388 | } |
389 | break; |
389 | break; |
390 | case 6: |
390 | case 6: |
391 | if (force_no_swizzle) { |
391 | if (force_no_swizzle) { |
392 | swizzle_pipe[0] = 0; |
392 | swizzle_pipe[0] = 0; |
393 | swizzle_pipe[1] = 1; |
393 | swizzle_pipe[1] = 1; |
394 | swizzle_pipe[2] = 2; |
394 | swizzle_pipe[2] = 2; |
395 | swizzle_pipe[3] = 3; |
395 | swizzle_pipe[3] = 3; |
396 | swizzle_pipe[4] = 4; |
396 | swizzle_pipe[4] = 4; |
397 | swizzle_pipe[5] = 5; |
397 | swizzle_pipe[5] = 5; |
398 | } else { |
398 | } else { |
399 | swizzle_pipe[0] = 0; |
399 | swizzle_pipe[0] = 0; |
400 | swizzle_pipe[1] = 2; |
400 | swizzle_pipe[1] = 2; |
401 | swizzle_pipe[2] = 4; |
401 | swizzle_pipe[2] = 4; |
402 | swizzle_pipe[3] = 5; |
402 | swizzle_pipe[3] = 5; |
403 | swizzle_pipe[4] = 3; |
403 | swizzle_pipe[4] = 3; |
404 | swizzle_pipe[5] = 1; |
404 | swizzle_pipe[5] = 1; |
405 | } |
405 | } |
406 | break; |
406 | break; |
407 | case 7: |
407 | case 7: |
408 | if (force_no_swizzle) { |
408 | if (force_no_swizzle) { |
409 | swizzle_pipe[0] = 0; |
409 | swizzle_pipe[0] = 0; |
410 | swizzle_pipe[1] = 1; |
410 | swizzle_pipe[1] = 1; |
411 | swizzle_pipe[2] = 2; |
411 | swizzle_pipe[2] = 2; |
412 | swizzle_pipe[3] = 3; |
412 | swizzle_pipe[3] = 3; |
413 | swizzle_pipe[4] = 4; |
413 | swizzle_pipe[4] = 4; |
414 | swizzle_pipe[5] = 5; |
414 | swizzle_pipe[5] = 5; |
415 | swizzle_pipe[6] = 6; |
415 | swizzle_pipe[6] = 6; |
416 | } else { |
416 | } else { |
417 | swizzle_pipe[0] = 0; |
417 | swizzle_pipe[0] = 0; |
418 | swizzle_pipe[1] = 2; |
418 | swizzle_pipe[1] = 2; |
419 | swizzle_pipe[2] = 4; |
419 | swizzle_pipe[2] = 4; |
420 | swizzle_pipe[3] = 6; |
420 | swizzle_pipe[3] = 6; |
421 | swizzle_pipe[4] = 3; |
421 | swizzle_pipe[4] = 3; |
422 | swizzle_pipe[5] = 1; |
422 | swizzle_pipe[5] = 1; |
423 | swizzle_pipe[6] = 5; |
423 | swizzle_pipe[6] = 5; |
424 | } |
424 | } |
425 | break; |
425 | break; |
426 | case 8: |
426 | case 8: |
427 | if (force_no_swizzle) { |
427 | if (force_no_swizzle) { |
428 | swizzle_pipe[0] = 0; |
428 | swizzle_pipe[0] = 0; |
429 | swizzle_pipe[1] = 1; |
429 | swizzle_pipe[1] = 1; |
430 | swizzle_pipe[2] = 2; |
430 | swizzle_pipe[2] = 2; |
431 | swizzle_pipe[3] = 3; |
431 | swizzle_pipe[3] = 3; |
432 | swizzle_pipe[4] = 4; |
432 | swizzle_pipe[4] = 4; |
433 | swizzle_pipe[5] = 5; |
433 | swizzle_pipe[5] = 5; |
434 | swizzle_pipe[6] = 6; |
434 | swizzle_pipe[6] = 6; |
435 | swizzle_pipe[7] = 7; |
435 | swizzle_pipe[7] = 7; |
436 | } else { |
436 | } else { |
437 | swizzle_pipe[0] = 0; |
437 | swizzle_pipe[0] = 0; |
438 | swizzle_pipe[1] = 2; |
438 | swizzle_pipe[1] = 2; |
439 | swizzle_pipe[2] = 4; |
439 | swizzle_pipe[2] = 4; |
440 | swizzle_pipe[3] = 6; |
440 | swizzle_pipe[3] = 6; |
441 | swizzle_pipe[4] = 3; |
441 | swizzle_pipe[4] = 3; |
442 | swizzle_pipe[5] = 1; |
442 | swizzle_pipe[5] = 1; |
443 | swizzle_pipe[6] = 7; |
443 | swizzle_pipe[6] = 7; |
444 | swizzle_pipe[7] = 5; |
444 | swizzle_pipe[7] = 5; |
445 | } |
445 | } |
446 | break; |
446 | break; |
447 | } |
447 | } |
448 | 448 | ||
449 | cur_backend = 0; |
449 | cur_backend = 0; |
450 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { |
450 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { |
451 | while (((1 << cur_backend) & enabled_backends_mask) == 0) |
451 | while (((1 << cur_backend) & enabled_backends_mask) == 0) |
452 | cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; |
452 | cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; |
453 | 453 | ||
454 | backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); |
454 | backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); |
455 | 455 | ||
456 | cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; |
456 | cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; |
457 | } |
457 | } |
458 | 458 | ||
459 | return backend_map; |
459 | return backend_map; |
460 | } |
460 | } |
461 | 461 | ||
462 | static void rv770_program_channel_remap(struct radeon_device *rdev) |
462 | static void rv770_program_channel_remap(struct radeon_device *rdev) |
463 | { |
463 | { |
464 | u32 tcp_chan_steer, mc_shared_chremap, tmp; |
464 | u32 tcp_chan_steer, mc_shared_chremap, tmp; |
465 | bool force_no_swizzle; |
465 | bool force_no_swizzle; |
466 | 466 | ||
467 | switch (rdev->family) { |
467 | switch (rdev->family) { |
468 | case CHIP_RV770: |
468 | case CHIP_RV770: |
469 | case CHIP_RV730: |
469 | case CHIP_RV730: |
470 | force_no_swizzle = false; |
470 | force_no_swizzle = false; |
471 | break; |
471 | break; |
472 | case CHIP_RV710: |
472 | case CHIP_RV710: |
473 | case CHIP_RV740: |
473 | case CHIP_RV740: |
474 | default: |
474 | default: |
475 | force_no_swizzle = true; |
475 | force_no_swizzle = true; |
476 | break; |
476 | break; |
477 | } |
477 | } |
478 | 478 | ||
479 | tmp = RREG32(MC_SHARED_CHMAP); |
479 | tmp = RREG32(MC_SHARED_CHMAP); |
480 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { |
480 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { |
481 | case 0: |
481 | case 0: |
482 | case 1: |
482 | case 1: |
483 | default: |
483 | default: |
484 | /* default mapping */ |
484 | /* default mapping */ |
485 | mc_shared_chremap = 0x00fac688; |
485 | mc_shared_chremap = 0x00fac688; |
486 | break; |
486 | break; |
487 | case 2: |
487 | case 2: |
488 | case 3: |
488 | case 3: |
489 | if (force_no_swizzle) |
489 | if (force_no_swizzle) |
490 | mc_shared_chremap = 0x00fac688; |
490 | mc_shared_chremap = 0x00fac688; |
491 | else |
491 | else |
492 | mc_shared_chremap = 0x00bbc298; |
492 | mc_shared_chremap = 0x00bbc298; |
493 | break; |
493 | break; |
494 | } |
494 | } |
495 | 495 | ||
496 | if (rdev->family == CHIP_RV740) |
496 | if (rdev->family == CHIP_RV740) |
497 | tcp_chan_steer = 0x00ef2a60; |
497 | tcp_chan_steer = 0x00ef2a60; |
498 | else |
498 | else |
499 | tcp_chan_steer = 0x00fac688; |
499 | tcp_chan_steer = 0x00fac688; |
500 | 500 | ||
501 | /* RV770 CE has special chremap setup */ |
501 | /* RV770 CE has special chremap setup */ |
502 | if (rdev->pdev->device == 0x944e) { |
502 | if (rdev->pdev->device == 0x944e) { |
503 | tcp_chan_steer = 0x00b08b08; |
503 | tcp_chan_steer = 0x00b08b08; |
504 | mc_shared_chremap = 0x00b08b08; |
504 | mc_shared_chremap = 0x00b08b08; |
505 | } |
505 | } |
506 | 506 | ||
507 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); |
507 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); |
508 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); |
508 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); |
509 | } |
509 | } |
510 | 510 | ||
511 | static void rv770_gpu_init(struct radeon_device *rdev) |
511 | static void rv770_gpu_init(struct radeon_device *rdev) |
512 | { |
512 | { |
513 | int i, j, num_qd_pipes; |
513 | int i, j, num_qd_pipes; |
514 | u32 ta_aux_cntl; |
514 | u32 ta_aux_cntl; |
515 | u32 sx_debug_1; |
515 | u32 sx_debug_1; |
516 | u32 smx_dc_ctl0; |
516 | u32 smx_dc_ctl0; |
517 | u32 db_debug3; |
517 | u32 db_debug3; |
518 | u32 num_gs_verts_per_thread; |
518 | u32 num_gs_verts_per_thread; |
519 | u32 vgt_gs_per_es; |
519 | u32 vgt_gs_per_es; |
520 | u32 gs_prim_buffer_depth = 0; |
520 | u32 gs_prim_buffer_depth = 0; |
521 | u32 sq_ms_fifo_sizes; |
521 | u32 sq_ms_fifo_sizes; |
522 | u32 sq_config; |
522 | u32 sq_config; |
523 | u32 sq_thread_resource_mgmt; |
523 | u32 sq_thread_resource_mgmt; |
524 | u32 hdp_host_path_cntl; |
524 | u32 hdp_host_path_cntl; |
525 | u32 sq_dyn_gpr_size_simd_ab_0; |
525 | u32 sq_dyn_gpr_size_simd_ab_0; |
526 | u32 backend_map; |
526 | u32 backend_map; |
527 | u32 gb_tiling_config = 0; |
527 | u32 gb_tiling_config = 0; |
528 | u32 cc_rb_backend_disable = 0; |
528 | u32 cc_rb_backend_disable = 0; |
529 | u32 cc_gc_shader_pipe_config = 0; |
529 | u32 cc_gc_shader_pipe_config = 0; |
530 | u32 mc_arb_ramcfg; |
530 | u32 mc_arb_ramcfg; |
531 | u32 db_debug4; |
531 | u32 db_debug4; |
532 | 532 | ||
533 | /* setup chip specs */ |
533 | /* setup chip specs */ |
534 | switch (rdev->family) { |
534 | switch (rdev->family) { |
535 | case CHIP_RV770: |
535 | case CHIP_RV770: |
536 | rdev->config.rv770.max_pipes = 4; |
536 | rdev->config.rv770.max_pipes = 4; |
537 | rdev->config.rv770.max_tile_pipes = 8; |
537 | rdev->config.rv770.max_tile_pipes = 8; |
538 | rdev->config.rv770.max_simds = 10; |
538 | rdev->config.rv770.max_simds = 10; |
539 | rdev->config.rv770.max_backends = 4; |
539 | rdev->config.rv770.max_backends = 4; |
540 | rdev->config.rv770.max_gprs = 256; |
540 | rdev->config.rv770.max_gprs = 256; |
541 | rdev->config.rv770.max_threads = 248; |
541 | rdev->config.rv770.max_threads = 248; |
542 | rdev->config.rv770.max_stack_entries = 512; |
542 | rdev->config.rv770.max_stack_entries = 512; |
543 | rdev->config.rv770.max_hw_contexts = 8; |
543 | rdev->config.rv770.max_hw_contexts = 8; |
544 | rdev->config.rv770.max_gs_threads = 16 * 2; |
544 | rdev->config.rv770.max_gs_threads = 16 * 2; |
545 | rdev->config.rv770.sx_max_export_size = 128; |
545 | rdev->config.rv770.sx_max_export_size = 128; |
546 | rdev->config.rv770.sx_max_export_pos_size = 16; |
546 | rdev->config.rv770.sx_max_export_pos_size = 16; |
547 | rdev->config.rv770.sx_max_export_smx_size = 112; |
547 | rdev->config.rv770.sx_max_export_smx_size = 112; |
548 | rdev->config.rv770.sq_num_cf_insts = 2; |
548 | rdev->config.rv770.sq_num_cf_insts = 2; |
549 | 549 | ||
550 | rdev->config.rv770.sx_num_of_sets = 7; |
550 | rdev->config.rv770.sx_num_of_sets = 7; |
551 | rdev->config.rv770.sc_prim_fifo_size = 0xF9; |
551 | rdev->config.rv770.sc_prim_fifo_size = 0xF9; |
552 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
552 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
553 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
553 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
554 | break; |
554 | break; |
555 | case CHIP_RV730: |
555 | case CHIP_RV730: |
556 | rdev->config.rv770.max_pipes = 2; |
556 | rdev->config.rv770.max_pipes = 2; |
557 | rdev->config.rv770.max_tile_pipes = 4; |
557 | rdev->config.rv770.max_tile_pipes = 4; |
558 | rdev->config.rv770.max_simds = 8; |
558 | rdev->config.rv770.max_simds = 8; |
559 | rdev->config.rv770.max_backends = 2; |
559 | rdev->config.rv770.max_backends = 2; |
560 | rdev->config.rv770.max_gprs = 128; |
560 | rdev->config.rv770.max_gprs = 128; |
561 | rdev->config.rv770.max_threads = 248; |
561 | rdev->config.rv770.max_threads = 248; |
562 | rdev->config.rv770.max_stack_entries = 256; |
562 | rdev->config.rv770.max_stack_entries = 256; |
563 | rdev->config.rv770.max_hw_contexts = 8; |
563 | rdev->config.rv770.max_hw_contexts = 8; |
564 | rdev->config.rv770.max_gs_threads = 16 * 2; |
564 | rdev->config.rv770.max_gs_threads = 16 * 2; |
565 | rdev->config.rv770.sx_max_export_size = 256; |
565 | rdev->config.rv770.sx_max_export_size = 256; |
566 | rdev->config.rv770.sx_max_export_pos_size = 32; |
566 | rdev->config.rv770.sx_max_export_pos_size = 32; |
567 | rdev->config.rv770.sx_max_export_smx_size = 224; |
567 | rdev->config.rv770.sx_max_export_smx_size = 224; |
568 | rdev->config.rv770.sq_num_cf_insts = 2; |
568 | rdev->config.rv770.sq_num_cf_insts = 2; |
569 | 569 | ||
570 | rdev->config.rv770.sx_num_of_sets = 7; |
570 | rdev->config.rv770.sx_num_of_sets = 7; |
571 | rdev->config.rv770.sc_prim_fifo_size = 0xf9; |
571 | rdev->config.rv770.sc_prim_fifo_size = 0xf9; |
572 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
572 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
573 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
573 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
574 | if (rdev->config.rv770.sx_max_export_pos_size > 16) { |
574 | if (rdev->config.rv770.sx_max_export_pos_size > 16) { |
575 | rdev->config.rv770.sx_max_export_pos_size -= 16; |
575 | rdev->config.rv770.sx_max_export_pos_size -= 16; |
576 | rdev->config.rv770.sx_max_export_smx_size += 16; |
576 | rdev->config.rv770.sx_max_export_smx_size += 16; |
577 | } |
577 | } |
578 | break; |
578 | break; |
579 | case CHIP_RV710: |
579 | case CHIP_RV710: |
580 | rdev->config.rv770.max_pipes = 2; |
580 | rdev->config.rv770.max_pipes = 2; |
581 | rdev->config.rv770.max_tile_pipes = 2; |
581 | rdev->config.rv770.max_tile_pipes = 2; |
582 | rdev->config.rv770.max_simds = 2; |
582 | rdev->config.rv770.max_simds = 2; |
583 | rdev->config.rv770.max_backends = 1; |
583 | rdev->config.rv770.max_backends = 1; |
584 | rdev->config.rv770.max_gprs = 256; |
584 | rdev->config.rv770.max_gprs = 256; |
585 | rdev->config.rv770.max_threads = 192; |
585 | rdev->config.rv770.max_threads = 192; |
586 | rdev->config.rv770.max_stack_entries = 256; |
586 | rdev->config.rv770.max_stack_entries = 256; |
587 | rdev->config.rv770.max_hw_contexts = 4; |
587 | rdev->config.rv770.max_hw_contexts = 4; |
588 | rdev->config.rv770.max_gs_threads = 8 * 2; |
588 | rdev->config.rv770.max_gs_threads = 8 * 2; |
589 | rdev->config.rv770.sx_max_export_size = 128; |
589 | rdev->config.rv770.sx_max_export_size = 128; |
590 | rdev->config.rv770.sx_max_export_pos_size = 16; |
590 | rdev->config.rv770.sx_max_export_pos_size = 16; |
591 | rdev->config.rv770.sx_max_export_smx_size = 112; |
591 | rdev->config.rv770.sx_max_export_smx_size = 112; |
592 | rdev->config.rv770.sq_num_cf_insts = 1; |
592 | rdev->config.rv770.sq_num_cf_insts = 1; |
593 | 593 | ||
594 | rdev->config.rv770.sx_num_of_sets = 7; |
594 | rdev->config.rv770.sx_num_of_sets = 7; |
595 | rdev->config.rv770.sc_prim_fifo_size = 0x40; |
595 | rdev->config.rv770.sc_prim_fifo_size = 0x40; |
596 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
596 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
597 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
597 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
598 | break; |
598 | break; |
599 | case CHIP_RV740: |
599 | case CHIP_RV740: |
600 | rdev->config.rv770.max_pipes = 4; |
600 | rdev->config.rv770.max_pipes = 4; |
601 | rdev->config.rv770.max_tile_pipes = 4; |
601 | rdev->config.rv770.max_tile_pipes = 4; |
602 | rdev->config.rv770.max_simds = 8; |
602 | rdev->config.rv770.max_simds = 8; |
603 | rdev->config.rv770.max_backends = 4; |
603 | rdev->config.rv770.max_backends = 4; |
604 | rdev->config.rv770.max_gprs = 256; |
604 | rdev->config.rv770.max_gprs = 256; |
605 | rdev->config.rv770.max_threads = 248; |
605 | rdev->config.rv770.max_threads = 248; |
606 | rdev->config.rv770.max_stack_entries = 512; |
606 | rdev->config.rv770.max_stack_entries = 512; |
607 | rdev->config.rv770.max_hw_contexts = 8; |
607 | rdev->config.rv770.max_hw_contexts = 8; |
608 | rdev->config.rv770.max_gs_threads = 16 * 2; |
608 | rdev->config.rv770.max_gs_threads = 16 * 2; |
609 | rdev->config.rv770.sx_max_export_size = 256; |
609 | rdev->config.rv770.sx_max_export_size = 256; |
610 | rdev->config.rv770.sx_max_export_pos_size = 32; |
610 | rdev->config.rv770.sx_max_export_pos_size = 32; |
611 | rdev->config.rv770.sx_max_export_smx_size = 224; |
611 | rdev->config.rv770.sx_max_export_smx_size = 224; |
612 | rdev->config.rv770.sq_num_cf_insts = 2; |
612 | rdev->config.rv770.sq_num_cf_insts = 2; |
613 | 613 | ||
614 | rdev->config.rv770.sx_num_of_sets = 7; |
614 | rdev->config.rv770.sx_num_of_sets = 7; |
615 | rdev->config.rv770.sc_prim_fifo_size = 0x100; |
615 | rdev->config.rv770.sc_prim_fifo_size = 0x100; |
616 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
616 | rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; |
617 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
617 | rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; |
618 | 618 | ||
619 | if (rdev->config.rv770.sx_max_export_pos_size > 16) { |
619 | if (rdev->config.rv770.sx_max_export_pos_size > 16) { |
620 | rdev->config.rv770.sx_max_export_pos_size -= 16; |
620 | rdev->config.rv770.sx_max_export_pos_size -= 16; |
621 | rdev->config.rv770.sx_max_export_smx_size += 16; |
621 | rdev->config.rv770.sx_max_export_smx_size += 16; |
622 | } |
622 | } |
623 | break; |
623 | break; |
624 | default: |
624 | default: |
625 | break; |
625 | break; |
626 | } |
626 | } |
627 | 627 | ||
628 | /* Initialize HDP */ |
628 | /* Initialize HDP */ |
629 | j = 0; |
629 | j = 0; |
630 | for (i = 0; i < 32; i++) { |
630 | for (i = 0; i < 32; i++) { |
631 | WREG32((0x2c14 + j), 0x00000000); |
631 | WREG32((0x2c14 + j), 0x00000000); |
632 | WREG32((0x2c18 + j), 0x00000000); |
632 | WREG32((0x2c18 + j), 0x00000000); |
633 | WREG32((0x2c1c + j), 0x00000000); |
633 | WREG32((0x2c1c + j), 0x00000000); |
634 | WREG32((0x2c20 + j), 0x00000000); |
634 | WREG32((0x2c20 + j), 0x00000000); |
635 | WREG32((0x2c24 + j), 0x00000000); |
635 | WREG32((0x2c24 + j), 0x00000000); |
636 | j += 0x18; |
636 | j += 0x18; |
637 | } |
637 | } |
638 | 638 | ||
639 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
639 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
640 | 640 | ||
641 | /* setup tiling, simd, pipe config */ |
641 | /* setup tiling, simd, pipe config */ |
642 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
642 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
643 | 643 | ||
644 | switch (rdev->config.rv770.max_tile_pipes) { |
644 | switch (rdev->config.rv770.max_tile_pipes) { |
645 | case 1: |
645 | case 1: |
646 | default: |
646 | default: |
647 | gb_tiling_config |= PIPE_TILING(0); |
647 | gb_tiling_config |= PIPE_TILING(0); |
648 | break; |
648 | break; |
649 | case 2: |
649 | case 2: |
650 | gb_tiling_config |= PIPE_TILING(1); |
650 | gb_tiling_config |= PIPE_TILING(1); |
651 | break; |
651 | break; |
652 | case 4: |
652 | case 4: |
653 | gb_tiling_config |= PIPE_TILING(2); |
653 | gb_tiling_config |= PIPE_TILING(2); |
654 | break; |
654 | break; |
655 | case 8: |
655 | case 8: |
656 | gb_tiling_config |= PIPE_TILING(3); |
656 | gb_tiling_config |= PIPE_TILING(3); |
657 | break; |
657 | break; |
658 | } |
658 | } |
659 | rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; |
659 | rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; |
660 | 660 | ||
661 | if (rdev->family == CHIP_RV770) |
661 | if (rdev->family == CHIP_RV770) |
662 | gb_tiling_config |= BANK_TILING(1); |
662 | gb_tiling_config |= BANK_TILING(1); |
663 | else |
663 | else |
664 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
664 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
665 | rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); |
665 | rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); |
666 | gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); |
666 | gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); |
667 | if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) |
667 | if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) |
668 | rdev->config.rv770.tiling_group_size = 512; |
668 | rdev->config.rv770.tiling_group_size = 512; |
669 | else |
669 | else |
670 | rdev->config.rv770.tiling_group_size = 256; |
670 | rdev->config.rv770.tiling_group_size = 256; |
671 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { |
671 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { |
672 | gb_tiling_config |= ROW_TILING(3); |
672 | gb_tiling_config |= ROW_TILING(3); |
673 | gb_tiling_config |= SAMPLE_SPLIT(3); |
673 | gb_tiling_config |= SAMPLE_SPLIT(3); |
674 | } else { |
674 | } else { |
675 | gb_tiling_config |= |
675 | gb_tiling_config |= |
676 | ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); |
676 | ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); |
677 | gb_tiling_config |= |
677 | gb_tiling_config |= |
678 | SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); |
678 | SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); |
679 | } |
679 | } |
680 | 680 | ||
681 | gb_tiling_config |= BANK_SWAPS(1); |
681 | gb_tiling_config |= BANK_SWAPS(1); |
682 | 682 | ||
683 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; |
683 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; |
684 | cc_rb_backend_disable |= |
684 | cc_rb_backend_disable |= |
685 | BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); |
685 | BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); |
686 | 686 | ||
687 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; |
687 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; |
688 | cc_gc_shader_pipe_config |= |
688 | cc_gc_shader_pipe_config |= |
689 | INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); |
689 | INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); |
690 | cc_gc_shader_pipe_config |= |
690 | cc_gc_shader_pipe_config |= |
691 | INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); |
691 | INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); |
692 | 692 | ||
693 | if (rdev->family == CHIP_RV740) |
693 | if (rdev->family == CHIP_RV740) |
694 | backend_map = 0x28; |
694 | backend_map = 0x28; |
695 | else |
695 | else |
696 | backend_map = r700_get_tile_pipe_to_backend_map(rdev, |
696 | backend_map = r700_get_tile_pipe_to_backend_map(rdev, |
697 | rdev->config.rv770.max_tile_pipes, |
697 | rdev->config.rv770.max_tile_pipes, |
698 | (R7XX_MAX_BACKENDS - |
698 | (R7XX_MAX_BACKENDS - |
699 | r600_count_pipe_bits((cc_rb_backend_disable & |
699 | r600_count_pipe_bits((cc_rb_backend_disable & |
700 | R7XX_MAX_BACKENDS_MASK) >> 16)), |
700 | R7XX_MAX_BACKENDS_MASK) >> 16)), |
701 | (cc_rb_backend_disable >> 16)); |
701 | (cc_rb_backend_disable >> 16)); |
702 | 702 | ||
703 | rdev->config.rv770.tile_config = gb_tiling_config; |
703 | rdev->config.rv770.tile_config = gb_tiling_config; |
704 | gb_tiling_config |= BACKEND_MAP(backend_map); |
704 | gb_tiling_config |= BACKEND_MAP(backend_map); |
705 | 705 | ||
706 | WREG32(GB_TILING_CONFIG, gb_tiling_config); |
706 | WREG32(GB_TILING_CONFIG, gb_tiling_config); |
707 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
707 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
708 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
708 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
709 | 709 | ||
710 | rv770_program_channel_remap(rdev); |
710 | rv770_program_channel_remap(rdev); |
711 | 711 | ||
712 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
712 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
713 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
713 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
714 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
714 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
715 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
715 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
716 | 716 | ||
717 | WREG32(CGTS_SYS_TCC_DISABLE, 0); |
717 | WREG32(CGTS_SYS_TCC_DISABLE, 0); |
718 | WREG32(CGTS_TCC_DISABLE, 0); |
718 | WREG32(CGTS_TCC_DISABLE, 0); |
719 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); |
719 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); |
720 | WREG32(CGTS_USER_TCC_DISABLE, 0); |
720 | WREG32(CGTS_USER_TCC_DISABLE, 0); |
721 | 721 | ||
722 | num_qd_pipes = |
722 | num_qd_pipes = |
723 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
723 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
724 | WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); |
724 | WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); |
725 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); |
725 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); |
726 | 726 | ||
727 | /* set HW defaults for 3D engine */ |
727 | /* set HW defaults for 3D engine */ |
728 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | |
728 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | |
729 | ROQ_IB2_START(0x2b))); |
729 | ROQ_IB2_START(0x2b))); |
730 | 730 | ||
731 | WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); |
731 | WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); |
732 | 732 | ||
733 | ta_aux_cntl = RREG32(TA_CNTL_AUX); |
733 | ta_aux_cntl = RREG32(TA_CNTL_AUX); |
734 | WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO); |
734 | WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO); |
735 | 735 | ||
736 | sx_debug_1 = RREG32(SX_DEBUG_1); |
736 | sx_debug_1 = RREG32(SX_DEBUG_1); |
737 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; |
737 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; |
738 | WREG32(SX_DEBUG_1, sx_debug_1); |
738 | WREG32(SX_DEBUG_1, sx_debug_1); |
739 | 739 | ||
740 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); |
740 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); |
741 | smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff); |
741 | smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff); |
742 | smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); |
742 | smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); |
743 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); |
743 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); |
744 | 744 | ||
745 | if (rdev->family != CHIP_RV740) |
745 | if (rdev->family != CHIP_RV740) |
746 | WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | |
746 | WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | |
747 | GS_FLUSH_CTL(4) | |
747 | GS_FLUSH_CTL(4) | |
748 | ACK_FLUSH_CTL(3) | |
748 | ACK_FLUSH_CTL(3) | |
749 | SYNC_FLUSH_CTL)); |
749 | SYNC_FLUSH_CTL)); |
750 | 750 | ||
751 | db_debug3 = RREG32(DB_DEBUG3); |
751 | db_debug3 = RREG32(DB_DEBUG3); |
752 | db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); |
752 | db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); |
753 | switch (rdev->family) { |
753 | switch (rdev->family) { |
754 | case CHIP_RV770: |
754 | case CHIP_RV770: |
755 | case CHIP_RV740: |
755 | case CHIP_RV740: |
756 | db_debug3 |= DB_CLK_OFF_DELAY(0x1f); |
756 | db_debug3 |= DB_CLK_OFF_DELAY(0x1f); |
757 | break; |
757 | break; |
758 | case CHIP_RV710: |
758 | case CHIP_RV710: |
759 | case CHIP_RV730: |
759 | case CHIP_RV730: |
760 | default: |
760 | default: |
761 | db_debug3 |= DB_CLK_OFF_DELAY(2); |
761 | db_debug3 |= DB_CLK_OFF_DELAY(2); |
762 | break; |
762 | break; |
763 | } |
763 | } |
764 | WREG32(DB_DEBUG3, db_debug3); |
764 | WREG32(DB_DEBUG3, db_debug3); |
765 | 765 | ||
766 | if (rdev->family != CHIP_RV770) { |
766 | if (rdev->family != CHIP_RV770) { |
767 | db_debug4 = RREG32(DB_DEBUG4); |
767 | db_debug4 = RREG32(DB_DEBUG4); |
768 | db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; |
768 | db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; |
769 | WREG32(DB_DEBUG4, db_debug4); |
769 | WREG32(DB_DEBUG4, db_debug4); |
770 | } |
770 | } |
771 | 771 | ||
772 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | |
772 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | |
773 | POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | |
773 | POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | |
774 | SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); |
774 | SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); |
775 | 775 | ||
776 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | |
776 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | |
777 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | |
777 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | |
778 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); |
778 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); |
779 | 779 | ||
780 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); |
780 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); |
781 | 781 | ||
782 | WREG32(VGT_NUM_INSTANCES, 1); |
782 | WREG32(VGT_NUM_INSTANCES, 1); |
783 | 783 | ||
784 | WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); |
784 | WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); |
785 | 785 | ||
786 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); |
786 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); |
787 | 787 | ||
788 | WREG32(CP_PERFMON_CNTL, 0); |
788 | WREG32(CP_PERFMON_CNTL, 0); |
789 | 789 | ||
790 | sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) | |
790 | sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) | |
791 | DONE_FIFO_HIWATER(0xe0) | |
791 | DONE_FIFO_HIWATER(0xe0) | |
792 | ALU_UPDATE_FIFO_HIWATER(0x8)); |
792 | ALU_UPDATE_FIFO_HIWATER(0x8)); |
793 | switch (rdev->family) { |
793 | switch (rdev->family) { |
794 | case CHIP_RV770: |
794 | case CHIP_RV770: |
795 | case CHIP_RV730: |
795 | case CHIP_RV730: |
796 | case CHIP_RV710: |
796 | case CHIP_RV710: |
797 | sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1); |
797 | sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1); |
798 | break; |
798 | break; |
799 | case CHIP_RV740: |
799 | case CHIP_RV740: |
800 | default: |
800 | default: |
801 | sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); |
801 | sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); |
802 | break; |
802 | break; |
803 | } |
803 | } |
804 | WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes); |
804 | WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes); |
805 | 805 | ||
806 | /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT |
806 | /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT |
807 | * should be adjusted as needed by the 2D/3D drivers. This just sets default values |
807 | * should be adjusted as needed by the 2D/3D drivers. This just sets default values |
808 | */ |
808 | */ |
809 | sq_config = RREG32(SQ_CONFIG); |
809 | sq_config = RREG32(SQ_CONFIG); |
810 | sq_config &= ~(PS_PRIO(3) | |
810 | sq_config &= ~(PS_PRIO(3) | |
811 | VS_PRIO(3) | |
811 | VS_PRIO(3) | |
812 | GS_PRIO(3) | |
812 | GS_PRIO(3) | |
813 | ES_PRIO(3)); |
813 | ES_PRIO(3)); |
814 | sq_config |= (DX9_CONSTS | |
814 | sq_config |= (DX9_CONSTS | |
815 | VC_ENABLE | |
815 | VC_ENABLE | |
816 | EXPORT_SRC_C | |
816 | EXPORT_SRC_C | |
817 | PS_PRIO(0) | |
817 | PS_PRIO(0) | |
818 | VS_PRIO(1) | |
818 | VS_PRIO(1) | |
819 | GS_PRIO(2) | |
819 | GS_PRIO(2) | |
820 | ES_PRIO(3)); |
820 | ES_PRIO(3)); |
821 | if (rdev->family == CHIP_RV710) |
821 | if (rdev->family == CHIP_RV710) |
822 | /* no vertex cache */ |
822 | /* no vertex cache */ |
823 | sq_config &= ~VC_ENABLE; |
823 | sq_config &= ~VC_ENABLE; |
824 | 824 | ||
825 | WREG32(SQ_CONFIG, sq_config); |
825 | WREG32(SQ_CONFIG, sq_config); |
826 | 826 | ||
827 | WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | |
827 | WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | |
828 | NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | |
828 | NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | |
829 | NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2))); |
829 | NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2))); |
830 | 830 | ||
831 | WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) | |
831 | WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) | |
832 | NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64))); |
832 | NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64))); |
833 | 833 | ||
834 | sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) | |
834 | sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) | |
835 | NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) | |
835 | NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) | |
836 | NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8)); |
836 | NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8)); |
837 | if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads) |
837 | if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads) |
838 | sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads); |
838 | sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads); |
839 | else |
839 | else |
840 | sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8); |
840 | sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8); |
841 | WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); |
841 | WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); |
842 | 842 | ||
843 | WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | |
843 | WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | |
844 | NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); |
844 | NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); |
845 | 845 | ||
846 | WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | |
846 | WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | |
847 | NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); |
847 | NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); |
848 | 848 | ||
849 | sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) | |
849 | sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) | |
850 | SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) | |
850 | SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) | |
851 | SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) | |
851 | SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) | |
852 | SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64)); |
852 | SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64)); |
853 | 853 | ||
854 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0); |
854 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0); |
855 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0); |
855 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0); |
856 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0); |
856 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0); |
857 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0); |
857 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0); |
858 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0); |
858 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0); |
859 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0); |
859 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0); |
860 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0); |
860 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0); |
861 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0); |
861 | WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0); |
862 | 862 | ||
863 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | |
863 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | |
864 | FORCE_EOV_MAX_REZ_CNT(255))); |
864 | FORCE_EOV_MAX_REZ_CNT(255))); |
865 | 865 | ||
866 | if (rdev->family == CHIP_RV710) |
866 | if (rdev->family == CHIP_RV710) |
867 | WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) | |
867 | WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) | |
868 | AUTO_INVLD_EN(ES_AND_GS_AUTO))); |
868 | AUTO_INVLD_EN(ES_AND_GS_AUTO))); |
869 | else |
869 | else |
870 | WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) | |
870 | WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) | |
871 | AUTO_INVLD_EN(ES_AND_GS_AUTO))); |
871 | AUTO_INVLD_EN(ES_AND_GS_AUTO))); |
872 | 872 | ||
873 | switch (rdev->family) { |
873 | switch (rdev->family) { |
874 | case CHIP_RV770: |
874 | case CHIP_RV770: |
875 | case CHIP_RV730: |
875 | case CHIP_RV730: |
876 | case CHIP_RV740: |
876 | case CHIP_RV740: |
877 | gs_prim_buffer_depth = 384; |
877 | gs_prim_buffer_depth = 384; |
878 | break; |
878 | break; |
879 | case CHIP_RV710: |
879 | case CHIP_RV710: |
880 | gs_prim_buffer_depth = 128; |
880 | gs_prim_buffer_depth = 128; |
881 | break; |
881 | break; |
882 | default: |
882 | default: |
883 | break; |
883 | break; |
884 | } |
884 | } |
885 | 885 | ||
886 | num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16; |
886 | num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16; |
887 | vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread; |
887 | vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread; |
888 | /* Max value for this is 256 */ |
888 | /* Max value for this is 256 */ |
889 | if (vgt_gs_per_es > 256) |
889 | if (vgt_gs_per_es > 256) |
890 | vgt_gs_per_es = 256; |
890 | vgt_gs_per_es = 256; |
891 | 891 | ||
892 | WREG32(VGT_ES_PER_GS, 128); |
892 | WREG32(VGT_ES_PER_GS, 128); |
893 | WREG32(VGT_GS_PER_ES, vgt_gs_per_es); |
893 | WREG32(VGT_GS_PER_ES, vgt_gs_per_es); |
894 | WREG32(VGT_GS_PER_VS, 2); |
894 | WREG32(VGT_GS_PER_VS, 2); |
895 | 895 | ||
896 | /* more default values. 2D/3D driver should adjust as needed */ |
896 | /* more default values. 2D/3D driver should adjust as needed */ |
897 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
897 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
898 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
898 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
899 | WREG32(VGT_STRMOUT_EN, 0); |
899 | WREG32(VGT_STRMOUT_EN, 0); |
900 | WREG32(SX_MISC, 0); |
900 | WREG32(SX_MISC, 0); |
901 | WREG32(PA_SC_MODE_CNTL, 0); |
901 | WREG32(PA_SC_MODE_CNTL, 0); |
902 | WREG32(PA_SC_EDGERULE, 0xaaaaaaaa); |
902 | WREG32(PA_SC_EDGERULE, 0xaaaaaaaa); |
903 | WREG32(PA_SC_AA_CONFIG, 0); |
903 | WREG32(PA_SC_AA_CONFIG, 0); |
904 | WREG32(PA_SC_CLIPRECT_RULE, 0xffff); |
904 | WREG32(PA_SC_CLIPRECT_RULE, 0xffff); |
905 | WREG32(PA_SC_LINE_STIPPLE, 0); |
905 | WREG32(PA_SC_LINE_STIPPLE, 0); |
906 | WREG32(SPI_INPUT_Z, 0); |
906 | WREG32(SPI_INPUT_Z, 0); |
907 | WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); |
907 | WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); |
908 | WREG32(CB_COLOR7_FRAG, 0); |
908 | WREG32(CB_COLOR7_FRAG, 0); |
909 | 909 | ||
910 | /* clear render buffer base addresses */ |
910 | /* clear render buffer base addresses */ |
911 | WREG32(CB_COLOR0_BASE, 0); |
911 | WREG32(CB_COLOR0_BASE, 0); |
912 | WREG32(CB_COLOR1_BASE, 0); |
912 | WREG32(CB_COLOR1_BASE, 0); |
913 | WREG32(CB_COLOR2_BASE, 0); |
913 | WREG32(CB_COLOR2_BASE, 0); |
914 | WREG32(CB_COLOR3_BASE, 0); |
914 | WREG32(CB_COLOR3_BASE, 0); |
915 | WREG32(CB_COLOR4_BASE, 0); |
915 | WREG32(CB_COLOR4_BASE, 0); |
916 | WREG32(CB_COLOR5_BASE, 0); |
916 | WREG32(CB_COLOR5_BASE, 0); |
917 | WREG32(CB_COLOR6_BASE, 0); |
917 | WREG32(CB_COLOR6_BASE, 0); |
918 | WREG32(CB_COLOR7_BASE, 0); |
918 | WREG32(CB_COLOR7_BASE, 0); |
919 | 919 | ||
920 | WREG32(TCP_CNTL, 0); |
920 | WREG32(TCP_CNTL, 0); |
921 | 921 | ||
922 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
922 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
923 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); |
923 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); |
924 | 924 | ||
925 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); |
925 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); |
926 | 926 | ||
927 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | |
927 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | |
928 | NUM_CLIP_SEQ(3))); |
928 | NUM_CLIP_SEQ(3))); |
929 | 929 | ||
930 | } |
930 | } |
931 | 931 | ||
932 | static int rv770_vram_scratch_init(struct radeon_device *rdev) |
932 | static int rv770_vram_scratch_init(struct radeon_device *rdev) |
933 | { |
933 | { |
934 | int r; |
934 | int r; |
935 | u64 gpu_addr; |
935 | u64 gpu_addr; |
936 | 936 | ||
937 | if (rdev->vram_scratch.robj == NULL) { |
937 | if (rdev->vram_scratch.robj == NULL) { |
938 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
938 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
939 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
939 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
940 | &rdev->vram_scratch.robj); |
940 | &rdev->vram_scratch.robj); |
941 | if (r) { |
941 | if (r) { |
942 | return r; |
942 | return r; |
943 | } |
943 | } |
944 | } |
944 | } |
945 | 945 | ||
946 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); |
946 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); |
947 | if (unlikely(r != 0)) |
947 | if (unlikely(r != 0)) |
948 | return r; |
948 | return r; |
949 | r = radeon_bo_pin(rdev->vram_scratch.robj, |
949 | r = radeon_bo_pin(rdev->vram_scratch.robj, |
950 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
950 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
951 | if (r) { |
951 | if (r) { |
952 | radeon_bo_unreserve(rdev->vram_scratch.robj); |
952 | radeon_bo_unreserve(rdev->vram_scratch.robj); |
953 | return r; |
953 | return r; |
954 | } |
954 | } |
955 | r = radeon_bo_kmap(rdev->vram_scratch.robj, |
955 | r = radeon_bo_kmap(rdev->vram_scratch.robj, |
956 | (void **)&rdev->vram_scratch.ptr); |
956 | (void **)&rdev->vram_scratch.ptr); |
957 | if (r) |
957 | if (r) |
958 | radeon_bo_unpin(rdev->vram_scratch.robj); |
958 | radeon_bo_unpin(rdev->vram_scratch.robj); |
959 | radeon_bo_unreserve(rdev->vram_scratch.robj); |
959 | radeon_bo_unreserve(rdev->vram_scratch.robj); |
960 | 960 | ||
961 | return r; |
961 | return r; |
962 | } |
962 | } |
963 | 963 | ||
964 | static void rv770_vram_scratch_fini(struct radeon_device *rdev) |
964 | static void rv770_vram_scratch_fini(struct radeon_device *rdev) |
965 | { |
965 | { |
966 | int r; |
966 | int r; |
967 | 967 | ||
968 | if (rdev->vram_scratch.robj == NULL) { |
968 | if (rdev->vram_scratch.robj == NULL) { |
969 | return; |
969 | return; |
970 | } |
970 | } |
971 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); |
971 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); |
972 | if (likely(r == 0)) { |
972 | if (likely(r == 0)) { |
973 | radeon_bo_kunmap(rdev->vram_scratch.robj); |
973 | radeon_bo_kunmap(rdev->vram_scratch.robj); |
974 | radeon_bo_unpin(rdev->vram_scratch.robj); |
974 | radeon_bo_unpin(rdev->vram_scratch.robj); |
975 | radeon_bo_unreserve(rdev->vram_scratch.robj); |
975 | radeon_bo_unreserve(rdev->vram_scratch.robj); |
976 | } |
976 | } |
977 | radeon_bo_unref(&rdev->vram_scratch.robj); |
977 | radeon_bo_unref(&rdev->vram_scratch.robj); |
978 | } |
978 | } |
979 | 979 | ||
980 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
980 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
981 | { |
981 | { |
982 | u64 size_bf, size_af; |
982 | u64 size_bf, size_af; |
983 | 983 | ||
984 | if (mc->mc_vram_size > 0xE0000000) { |
984 | if (mc->mc_vram_size > 0xE0000000) { |
985 | /* leave room for at least 512M GTT */ |
985 | /* leave room for at least 512M GTT */ |
986 | dev_warn(rdev->dev, "limiting VRAM\n"); |
986 | dev_warn(rdev->dev, "limiting VRAM\n"); |
987 | mc->real_vram_size = 0xE0000000; |
987 | mc->real_vram_size = 0xE0000000; |
988 | mc->mc_vram_size = 0xE0000000; |
988 | mc->mc_vram_size = 0xE0000000; |
989 | } |
989 | } |
990 | if (rdev->flags & RADEON_IS_AGP) { |
990 | if (rdev->flags & RADEON_IS_AGP) { |
991 | size_bf = mc->gtt_start; |
991 | size_bf = mc->gtt_start; |
992 | size_af = 0xFFFFFFFF - mc->gtt_end + 1; |
992 | size_af = 0xFFFFFFFF - mc->gtt_end + 1; |
993 | if (size_bf > size_af) { |
993 | if (size_bf > size_af) { |
994 | if (mc->mc_vram_size > size_bf) { |
994 | if (mc->mc_vram_size > size_bf) { |
995 | dev_warn(rdev->dev, "limiting VRAM\n"); |
995 | dev_warn(rdev->dev, "limiting VRAM\n"); |
996 | mc->real_vram_size = size_bf; |
996 | mc->real_vram_size = size_bf; |
997 | mc->mc_vram_size = size_bf; |
997 | mc->mc_vram_size = size_bf; |
998 | } |
998 | } |
999 | mc->vram_start = mc->gtt_start - mc->mc_vram_size; |
999 | mc->vram_start = mc->gtt_start - mc->mc_vram_size; |
1000 | } else { |
1000 | } else { |
1001 | if (mc->mc_vram_size > size_af) { |
1001 | if (mc->mc_vram_size > size_af) { |
1002 | dev_warn(rdev->dev, "limiting VRAM\n"); |
1002 | dev_warn(rdev->dev, "limiting VRAM\n"); |
1003 | mc->real_vram_size = size_af; |
1003 | mc->real_vram_size = size_af; |
1004 | mc->mc_vram_size = size_af; |
1004 | mc->mc_vram_size = size_af; |
1005 | } |
1005 | } |
1006 | mc->vram_start = mc->gtt_end; |
1006 | mc->vram_start = mc->gtt_end; |
1007 | } |
1007 | } |
1008 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
1008 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
1009 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", |
1009 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", |
1010 | mc->mc_vram_size >> 20, mc->vram_start, |
1010 | mc->mc_vram_size >> 20, mc->vram_start, |
1011 | mc->vram_end, mc->real_vram_size >> 20); |
1011 | mc->vram_end, mc->real_vram_size >> 20); |
1012 | } else { |
1012 | } else { |
1013 | radeon_vram_location(rdev, &rdev->mc, 0); |
1013 | radeon_vram_location(rdev, &rdev->mc, 0); |
1014 | rdev->mc.gtt_base_align = 0; |
1014 | rdev->mc.gtt_base_align = 0; |
1015 | radeon_gtt_location(rdev, mc); |
1015 | radeon_gtt_location(rdev, mc); |
1016 | } |
1016 | } |
1017 | } |
1017 | } |
1018 | 1018 | ||
1019 | int rv770_mc_init(struct radeon_device *rdev) |
1019 | int rv770_mc_init(struct radeon_device *rdev) |
1020 | { |
1020 | { |
1021 | u32 tmp; |
1021 | u32 tmp; |
1022 | int chansize, numchan; |
1022 | int chansize, numchan; |
1023 | 1023 | ||
1024 | /* Get VRAM informations */ |
1024 | /* Get VRAM informations */ |
1025 | rdev->mc.vram_is_ddr = true; |
1025 | rdev->mc.vram_is_ddr = true; |
1026 | tmp = RREG32(MC_ARB_RAMCFG); |
1026 | tmp = RREG32(MC_ARB_RAMCFG); |
1027 | if (tmp & CHANSIZE_OVERRIDE) { |
1027 | if (tmp & CHANSIZE_OVERRIDE) { |
1028 | chansize = 16; |
1028 | chansize = 16; |
1029 | } else if (tmp & CHANSIZE_MASK) { |
1029 | } else if (tmp & CHANSIZE_MASK) { |
1030 | chansize = 64; |
1030 | chansize = 64; |
1031 | } else { |
1031 | } else { |
1032 | chansize = 32; |
1032 | chansize = 32; |
1033 | } |
1033 | } |
1034 | tmp = RREG32(MC_SHARED_CHMAP); |
1034 | tmp = RREG32(MC_SHARED_CHMAP); |
1035 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { |
1035 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { |
1036 | case 0: |
1036 | case 0: |
1037 | default: |
1037 | default: |
1038 | numchan = 1; |
1038 | numchan = 1; |
1039 | break; |
1039 | break; |
1040 | case 1: |
1040 | case 1: |
1041 | numchan = 2; |
1041 | numchan = 2; |
1042 | break; |
1042 | break; |
1043 | case 2: |
1043 | case 2: |
1044 | numchan = 4; |
1044 | numchan = 4; |
1045 | break; |
1045 | break; |
1046 | case 3: |
1046 | case 3: |
1047 | numchan = 8; |
1047 | numchan = 8; |
1048 | break; |
1048 | break; |
1049 | } |
1049 | } |
1050 | rdev->mc.vram_width = numchan * chansize; |
1050 | rdev->mc.vram_width = numchan * chansize; |
1051 | /* Could aper size report 0 ? */ |
1051 | /* Could aper size report 0 ? */ |
1052 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
1052 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
1053 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
1053 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
1054 | /* Setup GPU memory space */ |
1054 | /* Setup GPU memory space */ |
1055 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1055 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1056 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1056 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1057 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1057 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1058 | r700_vram_gtt_location(rdev, &rdev->mc); |
1058 | r700_vram_gtt_location(rdev, &rdev->mc); |
1059 | radeon_update_bandwidth_info(rdev); |
1059 | radeon_update_bandwidth_info(rdev); |
1060 | 1060 | ||
1061 | return 0; |
1061 | return 0; |
1062 | } |
1062 | } |
1063 | 1063 | ||
1064 | static int rv770_startup(struct radeon_device *rdev) |
1064 | static int rv770_startup(struct radeon_device *rdev) |
1065 | { |
1065 | { |
1066 | int r; |
1066 | int r; |
1067 | 1067 | ||
1068 | /* enable pcie gen2 link */ |
1068 | /* enable pcie gen2 link */ |
1069 | rv770_pcie_gen2_enable(rdev); |
1069 | rv770_pcie_gen2_enable(rdev); |
1070 | 1070 | ||
1071 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
1071 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
1072 | r = r600_init_microcode(rdev); |
1072 | r = r600_init_microcode(rdev); |
1073 | if (r) { |
1073 | if (r) { |
1074 | DRM_ERROR("Failed to load firmware!\n"); |
1074 | DRM_ERROR("Failed to load firmware!\n"); |
1075 | return r; |
1075 | return r; |
1076 | } |
1076 | } |
1077 | } |
1077 | } |
1078 | 1078 | ||
1079 | rv770_mc_program(rdev); |
1079 | rv770_mc_program(rdev); |
1080 | if (rdev->flags & RADEON_IS_AGP) { |
1080 | if (rdev->flags & RADEON_IS_AGP) { |
1081 | rv770_agp_enable(rdev); |
1081 | rv770_agp_enable(rdev); |
1082 | } else { |
1082 | } else { |
1083 | r = rv770_pcie_gart_enable(rdev); |
1083 | r = rv770_pcie_gart_enable(rdev); |
1084 | if (r) |
1084 | if (r) |
1085 | return r; |
1085 | return r; |
1086 | } |
1086 | } |
1087 | r = rv770_vram_scratch_init(rdev); |
1087 | r = rv770_vram_scratch_init(rdev); |
1088 | if (r) |
1088 | if (r) |
1089 | return r; |
1089 | return r; |
1090 | rv770_gpu_init(rdev); |
1090 | rv770_gpu_init(rdev); |
- | 1091 | r = r600_blit_init(rdev); |
|
- | 1092 | if (r) { |
|
- | 1093 | // r600_blit_fini(rdev); |
|
- | 1094 | rdev->asic->copy = NULL; |
|
- | 1095 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
|
- | 1096 | } |
|
- | 1097 | ||
1091 | /* allocate wb buffer */ |
1098 | /* allocate wb buffer */ |
1092 | r = radeon_wb_init(rdev); |
1099 | r = radeon_wb_init(rdev); |
1093 | if (r) |
1100 | if (r) |
1094 | return r; |
1101 | return r; |
1095 | 1102 | ||
1096 | /* Enable IRQ */ |
1103 | /* Enable IRQ */ |
1097 | r = r600_irq_init(rdev); |
1104 | r = r600_irq_init(rdev); |
1098 | if (r) { |
1105 | if (r) { |
1099 | DRM_ERROR("radeon: IH init failed (%d).\n", r); |
1106 | DRM_ERROR("radeon: IH init failed (%d).\n", r); |
1100 | // radeon_irq_kms_fini(rdev); |
1107 | // radeon_irq_kms_fini(rdev); |
1101 | return r; |
1108 | return r; |
1102 | } |
1109 | } |
1103 | r600_irq_set(rdev); |
1110 | r600_irq_set(rdev); |
1104 | 1111 | ||
1105 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
1112 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
1106 | if (r) |
1113 | if (r) |
1107 | return r; |
1114 | return r; |
1108 | r = rv770_cp_load_microcode(rdev); |
1115 | r = rv770_cp_load_microcode(rdev); |
1109 | if (r) |
1116 | if (r) |
1110 | return r; |
1117 | return r; |
1111 | r = r600_cp_resume(rdev); |
1118 | r = r600_cp_resume(rdev); |
1112 | if (r) |
1119 | if (r) |
1113 | return r; |
1120 | return r; |
1114 | 1121 | ||
1115 | return 0; |
1122 | return 0; |
1116 | } |
1123 | } |
1117 | 1124 | ||
1118 | 1125 | ||
1119 | 1126 | ||
1120 | 1127 | ||
1121 | 1128 | ||
1122 | 1129 | ||
1123 | 1130 | ||
1124 | /* Plan is to move initialization in that function and use |
1131 | /* Plan is to move initialization in that function and use |
1125 | * helper function so that radeon_device_init pretty much |
1132 | * helper function so that radeon_device_init pretty much |
1126 | * do nothing more than calling asic specific function. This |
1133 | * do nothing more than calling asic specific function. This |
1127 | * should also allow to remove a bunch of callback function |
1134 | * should also allow to remove a bunch of callback function |
1128 | * like vram_info. |
1135 | * like vram_info. |
1129 | */ |
1136 | */ |
1130 | int rv770_init(struct radeon_device *rdev) |
1137 | int rv770_init(struct radeon_device *rdev) |
1131 | { |
1138 | { |
1132 | int r; |
1139 | int r; |
1133 | 1140 | ||
1134 | /* This don't do much */ |
1141 | /* This don't do much */ |
1135 | r = radeon_gem_init(rdev); |
1142 | r = radeon_gem_init(rdev); |
1136 | if (r) |
1143 | if (r) |
1137 | return r; |
1144 | return r; |
1138 | /* Read BIOS */ |
1145 | /* Read BIOS */ |
1139 | if (!radeon_get_bios(rdev)) { |
1146 | if (!radeon_get_bios(rdev)) { |
1140 | if (ASIC_IS_AVIVO(rdev)) |
1147 | if (ASIC_IS_AVIVO(rdev)) |
1141 | return -EINVAL; |
1148 | return -EINVAL; |
1142 | } |
1149 | } |
1143 | /* Must be an ATOMBIOS */ |
1150 | /* Must be an ATOMBIOS */ |
1144 | if (!rdev->is_atom_bios) { |
1151 | if (!rdev->is_atom_bios) { |
1145 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); |
1152 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); |
1146 | return -EINVAL; |
1153 | return -EINVAL; |
1147 | } |
1154 | } |
1148 | r = radeon_atombios_init(rdev); |
1155 | r = radeon_atombios_init(rdev); |
1149 | if (r) |
1156 | if (r) |
1150 | return r; |
1157 | return r; |
1151 | /* Post card if necessary */ |
1158 | /* Post card if necessary */ |
1152 | if (!radeon_card_posted(rdev)) { |
1159 | if (!radeon_card_posted(rdev)) { |
1153 | if (!rdev->bios) { |
1160 | if (!rdev->bios) { |
1154 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
1161 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
1155 | return -EINVAL; |
1162 | return -EINVAL; |
1156 | } |
1163 | } |
1157 | DRM_INFO("GPU not posted. posting now...\n"); |
1164 | DRM_INFO("GPU not posted. posting now...\n"); |
1158 | atom_asic_init(rdev->mode_info.atom_context); |
1165 | atom_asic_init(rdev->mode_info.atom_context); |
1159 | } |
1166 | } |
1160 | /* Initialize scratch registers */ |
1167 | /* Initialize scratch registers */ |
1161 | r600_scratch_init(rdev); |
1168 | r600_scratch_init(rdev); |
1162 | /* Initialize surface registers */ |
1169 | /* Initialize surface registers */ |
1163 | radeon_surface_init(rdev); |
1170 | radeon_surface_init(rdev); |
1164 | /* Initialize clocks */ |
1171 | /* Initialize clocks */ |
1165 | radeon_get_clock_info(rdev->ddev); |
1172 | radeon_get_clock_info(rdev->ddev); |
1166 | /* Fence driver */ |
1173 | /* Fence driver */ |
1167 | r = radeon_fence_driver_init(rdev); |
1174 | r = radeon_fence_driver_init(rdev); |
1168 | if (r) |
1175 | if (r) |
1169 | return r; |
1176 | return r; |
1170 | /* initialize AGP */ |
1177 | /* initialize AGP */ |
1171 | if (rdev->flags & RADEON_IS_AGP) { |
1178 | if (rdev->flags & RADEON_IS_AGP) { |
1172 | r = radeon_agp_init(rdev); |
1179 | r = radeon_agp_init(rdev); |
1173 | if (r) |
1180 | if (r) |
1174 | radeon_agp_disable(rdev); |
1181 | radeon_agp_disable(rdev); |
1175 | } |
1182 | } |
1176 | r = rv770_mc_init(rdev); |
1183 | r = rv770_mc_init(rdev); |
1177 | if (r) |
1184 | if (r) |
1178 | return r; |
1185 | return r; |
1179 | /* Memory manager */ |
1186 | /* Memory manager */ |
1180 | r = radeon_bo_init(rdev); |
1187 | r = radeon_bo_init(rdev); |
1181 | if (r) |
1188 | if (r) |
1182 | return r; |
1189 | return r; |
1183 | 1190 | ||
1184 | r = radeon_irq_kms_init(rdev); |
1191 | r = radeon_irq_kms_init(rdev); |
1185 | if (r) |
1192 | if (r) |
1186 | return r; |
1193 | return r; |
1187 | 1194 | ||
1188 | rdev->cp.ring_obj = NULL; |
1195 | rdev->cp.ring_obj = NULL; |
1189 | r600_ring_init(rdev, 1024 * 1024); |
1196 | r600_ring_init(rdev, 1024 * 1024); |
1190 | 1197 | ||
1191 | rdev->ih.ring_obj = NULL; |
1198 | rdev->ih.ring_obj = NULL; |
1192 | r600_ih_ring_init(rdev, 64 * 1024); |
1199 | r600_ih_ring_init(rdev, 64 * 1024); |
1193 | 1200 | ||
1194 | r = r600_pcie_gart_init(rdev); |
1201 | r = r600_pcie_gart_init(rdev); |
1195 | if (r) |
1202 | if (r) |
1196 | return r; |
1203 | return r; |
1197 | 1204 | ||
1198 | rdev->accel_working = true; |
1205 | rdev->accel_working = true; |
1199 | r = rv770_startup(rdev); |
1206 | r = rv770_startup(rdev); |
1200 | if (r) { |
1207 | if (r) { |
1201 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1208 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1202 | rv770_pcie_gart_fini(rdev); |
1209 | rv770_pcie_gart_fini(rdev); |
1203 | rdev->accel_working = false; |
1210 | rdev->accel_working = false; |
1204 | } |
1211 | } |
1205 | if (rdev->accel_working) { |
1212 | if (rdev->accel_working) { |
1206 | r = radeon_ib_pool_init(rdev); |
1213 | r = radeon_ib_pool_init(rdev); |
1207 | if (r) { |
1214 | if (r) { |
1208 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
1215 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
1209 | rdev->accel_working = false; |
1216 | rdev->accel_working = false; |
1210 | } else { |
1217 | } else { |
1211 | r = r600_ib_test(rdev); |
1218 | r = r600_ib_test(rdev); |
1212 | if (r) { |
1219 | if (r) { |
1213 | dev_err(rdev->dev, "IB test failed (%d).\n", r); |
1220 | dev_err(rdev->dev, "IB test failed (%d).\n", r); |
1214 | rdev->accel_working = false; |
1221 | rdev->accel_working = false; |
1215 | } |
1222 | } |
1216 | } |
1223 | } |
1217 | } |
1224 | } |
1218 | 1225 | ||
1219 | return 0; |
1226 | return 0; |
1220 | } |
1227 | } |
1221 | 1228 | ||
1222 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) |
1229 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) |
1223 | { |
1230 | { |
1224 | u32 link_width_cntl, lanes, speed_cntl, tmp; |
1231 | u32 link_width_cntl, lanes, speed_cntl, tmp; |
1225 | u16 link_cntl2; |
1232 | u16 link_cntl2; |
1226 | 1233 | ||
1227 | if (radeon_pcie_gen2 == 0) |
1234 | if (radeon_pcie_gen2 == 0) |
1228 | return; |
1235 | return; |
1229 | 1236 | ||
1230 | if (rdev->flags & RADEON_IS_IGP) |
1237 | if (rdev->flags & RADEON_IS_IGP) |
1231 | return; |
1238 | return; |
1232 | 1239 | ||
1233 | if (!(rdev->flags & RADEON_IS_PCIE)) |
1240 | if (!(rdev->flags & RADEON_IS_PCIE)) |
1234 | return; |
1241 | return; |
1235 | 1242 | ||
1236 | /* x2 cards have a special sequence */ |
1243 | /* x2 cards have a special sequence */ |
1237 | if (ASIC_IS_X2(rdev)) |
1244 | if (ASIC_IS_X2(rdev)) |
1238 | return; |
1245 | return; |
1239 | 1246 | ||
1240 | /* advertise upconfig capability */ |
1247 | /* advertise upconfig capability */ |
1241 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
1248 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
1242 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; |
1249 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; |
1243 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1250 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1244 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
1251 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
1245 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { |
1252 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { |
1246 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; |
1253 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; |
1247 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | |
1254 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | |
1248 | LC_RECONFIG_ARC_MISSING_ESCAPE); |
1255 | LC_RECONFIG_ARC_MISSING_ESCAPE); |
1249 | link_width_cntl |= lanes | LC_RECONFIG_NOW | |
1256 | link_width_cntl |= lanes | LC_RECONFIG_NOW | |
1250 | LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; |
1257 | LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; |
1251 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1258 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1252 | } else { |
1259 | } else { |
1253 | link_width_cntl |= LC_UPCONFIGURE_DIS; |
1260 | link_width_cntl |= LC_UPCONFIGURE_DIS; |
1254 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1261 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1255 | } |
1262 | } |
1256 | 1263 | ||
1257 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1264 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1258 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && |
1265 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && |
1259 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { |
1266 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { |
1260 | 1267 | ||
1261 | tmp = RREG32(0x541c); |
1268 | tmp = RREG32(0x541c); |
1262 | WREG32(0x541c, tmp | 0x8); |
1269 | WREG32(0x541c, tmp | 0x8); |
1263 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); |
1270 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); |
1264 | link_cntl2 = RREG16(0x4088); |
1271 | link_cntl2 = RREG16(0x4088); |
1265 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; |
1272 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; |
1266 | link_cntl2 |= 0x2; |
1273 | link_cntl2 |= 0x2; |
1267 | WREG16(0x4088, link_cntl2); |
1274 | WREG16(0x4088, link_cntl2); |
1268 | WREG32(MM_CFGREGS_CNTL, 0); |
1275 | WREG32(MM_CFGREGS_CNTL, 0); |
1269 | 1276 | ||
1270 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1277 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1271 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; |
1278 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; |
1272 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1279 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1273 | 1280 | ||
1274 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1281 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1275 | speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; |
1282 | speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; |
1276 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1283 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1277 | 1284 | ||
1278 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1285 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1279 | speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; |
1286 | speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; |
1280 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1287 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1281 | 1288 | ||
1282 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1289 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
1283 | speed_cntl |= LC_GEN2_EN_STRAP; |
1290 | speed_cntl |= LC_GEN2_EN_STRAP; |
1284 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1291 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
1285 | 1292 | ||
1286 | } else { |
1293 | } else { |
1287 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
1294 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
1288 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ |
1295 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ |
1289 | if (1) |
1296 | if (1) |
1290 | link_width_cntl |= LC_UPCONFIGURE_DIS; |
1297 | link_width_cntl |= LC_UPCONFIGURE_DIS; |
1291 | else |
1298 | else |
1292 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; |
1299 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; |
1293 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1300 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
1294 | } |
1301 | } |
1295 | }><>><>><>><>>><>><>>><>>>>>>><>><>>>>>> |
1302 | }><>><>><>><>>><>><>>><>>>>>>><>><>>>>>> |