Rev 5060 | Rev 5367 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5060 | Rev 5354 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008-2012 Intel Corporation |
2 | * Copyright © 2008-2012 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * Chris Wilson |
25 | * Chris Wilson |
26 | * |
26 | * |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include "i915_drv.h" |
31 | #include "i915_drv.h" |
32 | 32 | ||
33 | /* |
33 | /* |
34 | * The BIOS typically reserves some of the system's memory for the exclusive |
34 | * The BIOS typically reserves some of the system's memory for the exclusive |
35 | * use of the integrated graphics. This memory is no longer available for |
35 | * use of the integrated graphics. This memory is no longer available for |
36 | * use by the OS and so the user finds that his system has less memory |
36 | * use by the OS and so the user finds that his system has less memory |
37 | * available than he put in. We refer to this memory as stolen. |
37 | * available than he put in. We refer to this memory as stolen. |
38 | * |
38 | * |
39 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
39 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
40 | * goal is try to reuse that object for our own fbcon which must always |
40 | * goal is try to reuse that object for our own fbcon which must always |
41 | * be available for panics. Anything else we can reuse the stolen memory |
41 | * be available for panics. Anything else we can reuse the stolen memory |
42 | * for is a boon. |
42 | * for is a boon. |
43 | */ |
43 | */ |
44 | 44 | ||
45 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
45 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
46 | { |
46 | { |
47 | struct drm_i915_private *dev_priv = dev->dev_private; |
47 | struct drm_i915_private *dev_priv = dev->dev_private; |
48 | struct resource *r; |
48 | struct resource *r; |
49 | u32 base; |
49 | u32 base; |
50 | 50 | ||
51 | /* Almost universally we can find the Graphics Base of Stolen Memory |
51 | /* Almost universally we can find the Graphics Base of Stolen Memory |
52 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
52 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
53 | * machines this is also mirrored in the bridge device at different |
53 | * machines this is also mirrored in the bridge device at different |
54 | * locations, or in the MCHBAR. On gen2, the layout is again slightly |
54 | * locations, or in the MCHBAR. On gen2, the layout is again slightly |
55 | * different with the Graphics Segment immediately following Top of |
55 | * different with the Graphics Segment immediately following Top of |
56 | * Memory (or Top of Usable DRAM). Note it appears that TOUD is only |
56 | * Memory (or Top of Usable DRAM). Note it appears that TOUD is only |
57 | * reported by 865g, so we just use the top of memory as determined |
57 | * reported by 865g, so we just use the top of memory as determined |
58 | * by the e820 probe. |
58 | * by the e820 probe. |
59 | * |
59 | * |
60 | * XXX However gen2 requires an unavailable symbol. |
60 | * XXX However gen2 requires an unavailable symbol. |
61 | */ |
61 | */ |
62 | base = 0; |
62 | base = 0; |
63 | if (INTEL_INFO(dev)->gen >= 3) { |
63 | if (INTEL_INFO(dev)->gen >= 3) { |
64 | /* Read Graphics Base of Stolen Memory directly */ |
64 | /* Read Graphics Base of Stolen Memory directly */ |
65 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
65 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
66 | base &= ~((1<<20) - 1); |
66 | base &= ~((1<<20) - 1); |
67 | } else { /* GEN2 */ |
67 | } else { /* GEN2 */ |
68 | #if 0 |
68 | #if 0 |
69 | /* Stolen is immediately above Top of Memory */ |
69 | /* Stolen is immediately above Top of Memory */ |
70 | base = max_low_pfn_mapped << PAGE_SHIFT; |
70 | base = max_low_pfn_mapped << PAGE_SHIFT; |
71 | #endif |
71 | #endif |
72 | } |
72 | } |
73 | 73 | ||
74 | if (base == 0) |
74 | if (base == 0) |
75 | return 0; |
75 | return 0; |
76 | 76 | ||
77 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
77 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
78 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
78 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
79 | struct { |
79 | struct { |
80 | u32 start, end; |
80 | u32 start, end; |
81 | } stolen[2] = { |
81 | } stolen[2] = { |
82 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
82 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
83 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
83 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
84 | }; |
84 | }; |
85 | u64 gtt_start, gtt_end; |
85 | u64 gtt_start, gtt_end; |
86 | 86 | ||
87 | gtt_start = I915_READ(PGTBL_CTL); |
87 | gtt_start = I915_READ(PGTBL_CTL); |
88 | if (IS_GEN4(dev)) |
88 | if (IS_GEN4(dev)) |
89 | gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
89 | gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
90 | (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
90 | (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
91 | else |
91 | else |
92 | gtt_start &= PGTBL_ADDRESS_LO_MASK; |
92 | gtt_start &= PGTBL_ADDRESS_LO_MASK; |
93 | gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
93 | gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
94 | 94 | ||
95 | if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
95 | if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
96 | stolen[0].end = gtt_start; |
96 | stolen[0].end = gtt_start; |
97 | if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
97 | if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
98 | stolen[1].start = gtt_end; |
98 | stolen[1].start = gtt_end; |
99 | 99 | ||
100 | /* pick the larger of the two chunks */ |
100 | /* pick the larger of the two chunks */ |
101 | if (stolen[0].end - stolen[0].start > |
101 | if (stolen[0].end - stolen[0].start > |
102 | stolen[1].end - stolen[1].start) { |
102 | stolen[1].end - stolen[1].start) { |
103 | base = stolen[0].start; |
103 | base = stolen[0].start; |
104 | dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
104 | dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
105 | } else { |
105 | } else { |
106 | base = stolen[1].start; |
106 | base = stolen[1].start; |
107 | dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
107 | dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
108 | } |
108 | } |
109 | 109 | ||
110 | if (stolen[0].start != stolen[1].start || |
110 | if (stolen[0].start != stolen[1].start || |
111 | stolen[0].end != stolen[1].end) { |
111 | stolen[0].end != stolen[1].end) { |
112 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
112 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
113 | (unsigned long long) gtt_start, |
113 | (unsigned long long) gtt_start, |
114 | (unsigned long long) gtt_end - 1); |
114 | (unsigned long long) gtt_end - 1); |
115 | DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
115 | DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
116 | base, base + (u32) dev_priv->gtt.stolen_size - 1); |
116 | base, base + (u32) dev_priv->gtt.stolen_size - 1); |
117 | } |
117 | } |
118 | } |
118 | } |
119 | 119 | ||
120 | #if 0 |
120 | #if 0 |
121 | 121 | ||
122 | /* Verify that nothing else uses this physical address. Stolen |
122 | /* Verify that nothing else uses this physical address. Stolen |
123 | * memory should be reserved by the BIOS and hidden from the |
123 | * memory should be reserved by the BIOS and hidden from the |
124 | * kernel. So if the region is already marked as busy, something |
124 | * kernel. So if the region is already marked as busy, something |
125 | * is seriously wrong. |
125 | * is seriously wrong. |
126 | */ |
126 | */ |
127 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
127 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
128 | "Graphics Stolen Memory"); |
128 | "Graphics Stolen Memory"); |
129 | if (r == NULL) { |
129 | if (r == NULL) { |
130 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
130 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
131 | base, base + (uint32_t)dev_priv->gtt.stolen_size); |
131 | base, base + (uint32_t)dev_priv->gtt.stolen_size); |
132 | base = 0; |
132 | base = 0; |
133 | } |
133 | } |
134 | #endif |
134 | #endif |
135 | return base; |
135 | return base; |
136 | } |
136 | } |
137 | 137 | ||
138 | static int find_compression_threshold(struct drm_device *dev, |
138 | static int find_compression_threshold(struct drm_device *dev, |
139 | struct drm_mm_node *node, |
139 | struct drm_mm_node *node, |
140 | int size, |
140 | int size, |
141 | int fb_cpp) |
141 | int fb_cpp) |
142 | { |
142 | { |
143 | struct drm_i915_private *dev_priv = dev->dev_private; |
143 | struct drm_i915_private *dev_priv = dev->dev_private; |
144 | int compression_threshold = 1; |
144 | int compression_threshold = 1; |
145 | int ret; |
145 | int ret; |
146 | 146 | ||
147 | /* HACK: This code depends on what we will do in *_enable_fbc. If that |
147 | /* HACK: This code depends on what we will do in *_enable_fbc. If that |
148 | * code changes, this code needs to change as well. |
148 | * code changes, this code needs to change as well. |
149 | * |
149 | * |
150 | * The enable_fbc code will attempt to use one of our 2 compression |
150 | * The enable_fbc code will attempt to use one of our 2 compression |
151 | * thresholds, therefore, in that case, we only have 1 resort. |
151 | * thresholds, therefore, in that case, we only have 1 resort. |
152 | */ |
152 | */ |
153 | 153 | ||
154 | /* Try to over-allocate to reduce reallocations and fragmentation. */ |
154 | /* Try to over-allocate to reduce reallocations and fragmentation. */ |
155 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, |
155 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, |
156 | size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); |
156 | size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); |
157 | if (ret == 0) |
157 | if (ret == 0) |
158 | return compression_threshold; |
158 | return compression_threshold; |
159 | 159 | ||
160 | again: |
160 | again: |
161 | /* HW's ability to limit the CFB is 1:4 */ |
161 | /* HW's ability to limit the CFB is 1:4 */ |
162 | if (compression_threshold > 4 || |
162 | if (compression_threshold > 4 || |
163 | (fb_cpp == 2 && compression_threshold == 2)) |
163 | (fb_cpp == 2 && compression_threshold == 2)) |
164 | return 0; |
164 | return 0; |
165 | 165 | ||
166 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, |
166 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, |
167 | size >>= 1, 4096, |
167 | size >>= 1, 4096, |
168 | DRM_MM_SEARCH_DEFAULT); |
168 | DRM_MM_SEARCH_DEFAULT); |
169 | if (ret && INTEL_INFO(dev)->gen <= 4) { |
169 | if (ret && INTEL_INFO(dev)->gen <= 4) { |
170 | return 0; |
170 | return 0; |
171 | } else if (ret) { |
171 | } else if (ret) { |
172 | compression_threshold <<= 1; |
172 | compression_threshold <<= 1; |
173 | goto again; |
173 | goto again; |
174 | } else { |
174 | } else { |
175 | return compression_threshold; |
175 | return compression_threshold; |
176 | } |
176 | } |
177 | } |
177 | } |
178 | 178 | ||
179 | static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) |
179 | static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) |
180 | { |
180 | { |
181 | struct drm_i915_private *dev_priv = dev->dev_private; |
181 | struct drm_i915_private *dev_priv = dev->dev_private; |
182 | struct drm_mm_node *uninitialized_var(compressed_llb); |
182 | struct drm_mm_node *uninitialized_var(compressed_llb); |
183 | int ret; |
183 | int ret; |
184 | 184 | ||
185 | ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb, |
185 | ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb, |
186 | size, fb_cpp); |
186 | size, fb_cpp); |
187 | if (!ret) |
187 | if (!ret) |
188 | goto err_llb; |
188 | goto err_llb; |
189 | else if (ret > 1) { |
189 | else if (ret > 1) { |
190 | DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); |
190 | DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); |
191 | 191 | ||
192 | } |
192 | } |
193 | 193 | ||
194 | dev_priv->fbc.threshold = ret; |
194 | dev_priv->fbc.threshold = ret; |
195 | 195 | ||
196 | if (HAS_PCH_SPLIT(dev)) |
196 | if (HAS_PCH_SPLIT(dev)) |
197 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); |
197 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); |
198 | else if (IS_GM45(dev)) { |
198 | else if (IS_GM45(dev)) { |
199 | I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); |
199 | I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); |
200 | } else { |
200 | } else { |
201 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); |
201 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); |
202 | if (!compressed_llb) |
202 | if (!compressed_llb) |
203 | goto err_fb; |
203 | goto err_fb; |
204 | 204 | ||
205 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, |
205 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, |
206 | 4096, 4096, DRM_MM_SEARCH_DEFAULT); |
206 | 4096, 4096, DRM_MM_SEARCH_DEFAULT); |
207 | if (ret) |
207 | if (ret) |
208 | goto err_fb; |
208 | goto err_fb; |
209 | 209 | ||
210 | dev_priv->fbc.compressed_llb = compressed_llb; |
210 | dev_priv->fbc.compressed_llb = compressed_llb; |
211 | 211 | ||
212 | I915_WRITE(FBC_CFB_BASE, |
212 | I915_WRITE(FBC_CFB_BASE, |
213 | dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); |
213 | dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); |
214 | I915_WRITE(FBC_LL_BASE, |
214 | I915_WRITE(FBC_LL_BASE, |
215 | dev_priv->mm.stolen_base + compressed_llb->start); |
215 | dev_priv->mm.stolen_base + compressed_llb->start); |
216 | } |
216 | } |
217 | 217 | ||
218 | dev_priv->fbc.size = size / dev_priv->fbc.threshold; |
218 | dev_priv->fbc.size = size / dev_priv->fbc.threshold; |
219 | 219 | ||
220 | DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", |
220 | DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", |
221 | size); |
221 | size); |
222 | 222 | ||
223 | return 0; |
223 | return 0; |
224 | 224 | ||
225 | err_fb: |
225 | err_fb: |
226 | kfree(compressed_llb); |
226 | kfree(compressed_llb); |
227 | drm_mm_remove_node(&dev_priv->fbc.compressed_fb); |
227 | drm_mm_remove_node(&dev_priv->fbc.compressed_fb); |
228 | err_llb: |
228 | err_llb: |
229 | return -ENOSPC; |
229 | return -ENOSPC; |
230 | } |
230 | } |
231 | 231 | ||
232 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp) |
232 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp) |
233 | { |
233 | { |
234 | struct drm_i915_private *dev_priv = dev->dev_private; |
234 | struct drm_i915_private *dev_priv = dev->dev_private; |
235 | 235 | ||
236 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
236 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
237 | return -ENODEV; |
237 | return -ENODEV; |
238 | 238 | ||
239 | if (size < dev_priv->fbc.size) |
239 | if (size < dev_priv->fbc.size) |
240 | return 0; |
240 | return 0; |
241 | 241 | ||
242 | /* Release any current block */ |
242 | /* Release any current block */ |
243 | i915_gem_stolen_cleanup_compression(dev); |
243 | i915_gem_stolen_cleanup_compression(dev); |
244 | 244 | ||
245 | return i915_setup_compression(dev, size, fb_cpp); |
245 | return i915_setup_compression(dev, size, fb_cpp); |
246 | } |
246 | } |
247 | 247 | ||
248 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev) |
248 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev) |
249 | { |
249 | { |
250 | struct drm_i915_private *dev_priv = dev->dev_private; |
250 | struct drm_i915_private *dev_priv = dev->dev_private; |
251 | 251 | ||
252 | if (dev_priv->fbc.size == 0) |
252 | if (dev_priv->fbc.size == 0) |
253 | return; |
253 | return; |
254 | 254 | ||
255 | drm_mm_remove_node(&dev_priv->fbc.compressed_fb); |
255 | drm_mm_remove_node(&dev_priv->fbc.compressed_fb); |
256 | 256 | ||
257 | if (dev_priv->fbc.compressed_llb) { |
257 | if (dev_priv->fbc.compressed_llb) { |
258 | drm_mm_remove_node(dev_priv->fbc.compressed_llb); |
258 | drm_mm_remove_node(dev_priv->fbc.compressed_llb); |
259 | kfree(dev_priv->fbc.compressed_llb); |
259 | kfree(dev_priv->fbc.compressed_llb); |
260 | } |
260 | } |
261 | 261 | ||
262 | dev_priv->fbc.size = 0; |
262 | dev_priv->fbc.size = 0; |
263 | } |
263 | } |
264 | 264 | ||
265 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
265 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
266 | { |
266 | { |
267 | struct drm_i915_private *dev_priv = dev->dev_private; |
267 | struct drm_i915_private *dev_priv = dev->dev_private; |
268 | 268 | ||
269 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
269 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
270 | return; |
270 | return; |
271 | 271 | ||
272 | i915_gem_stolen_cleanup_compression(dev); |
272 | i915_gem_stolen_cleanup_compression(dev); |
273 | drm_mm_takedown(&dev_priv->mm.stolen); |
273 | drm_mm_takedown(&dev_priv->mm.stolen); |
274 | } |
274 | } |
275 | 275 | ||
276 | int i915_gem_init_stolen(struct drm_device *dev) |
276 | int i915_gem_init_stolen(struct drm_device *dev) |
277 | { |
277 | { |
278 | struct drm_i915_private *dev_priv = dev->dev_private; |
278 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | 279 | u32 tmp; |
|
279 | int bios_reserved = 0; |
280 | int bios_reserved = 0; |
280 | 281 | ||
281 | #ifdef CONFIG_INTEL_IOMMU |
282 | #ifdef CONFIG_INTEL_IOMMU |
282 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
283 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
283 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
284 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
284 | return 0; |
285 | return 0; |
285 | } |
286 | } |
286 | #endif |
287 | #endif |
287 | 288 | ||
288 | if (dev_priv->gtt.stolen_size == 0) |
289 | if (dev_priv->gtt.stolen_size == 0) |
289 | return 0; |
290 | return 0; |
290 | 291 | ||
291 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
292 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
292 | if (dev_priv->mm.stolen_base == 0) |
293 | if (dev_priv->mm.stolen_base == 0) |
293 | return 0; |
294 | return 0; |
294 | 295 | ||
295 | DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", |
296 | DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", |
296 | dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); |
297 | dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); |
297 | 298 | ||
- | 299 | if (INTEL_INFO(dev)->gen >= 8) { |
|
- | 300 | tmp = I915_READ(GEN7_BIOS_RESERVED); |
|
- | 301 | tmp >>= GEN8_BIOS_RESERVED_SHIFT; |
|
298 | if (IS_VALLEYVIEW(dev)) |
302 | tmp &= GEN8_BIOS_RESERVED_MASK; |
- | 303 | bios_reserved = (1024*1024) << tmp; |
|
- | 304 | } else if (IS_GEN7(dev)) { |
|
- | 305 | tmp = I915_READ(GEN7_BIOS_RESERVED); |
|
- | 306 | bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ? |
|
- | 307 | 256*1024 : 1024*1024; |
|
299 | bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ |
308 | } |
300 | 309 | ||
301 | if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) |
310 | if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) |
302 | return 0; |
311 | return 0; |
303 | 312 | ||
304 | /* Basic memrange allocator for stolen space */ |
313 | /* Basic memrange allocator for stolen space */ |
305 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - |
314 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - |
306 | bios_reserved); |
315 | bios_reserved); |
307 | 316 | ||
308 | return 0; |
317 | return 0; |
309 | } |
318 | } |
310 | 319 | ||
311 | static struct sg_table * |
320 | static struct sg_table * |
312 | i915_pages_create_for_stolen(struct drm_device *dev, |
321 | i915_pages_create_for_stolen(struct drm_device *dev, |
313 | u32 offset, u32 size) |
322 | u32 offset, u32 size) |
314 | { |
323 | { |
315 | struct drm_i915_private *dev_priv = dev->dev_private; |
324 | struct drm_i915_private *dev_priv = dev->dev_private; |
316 | struct sg_table *st; |
325 | struct sg_table *st; |
317 | struct scatterlist *sg; |
326 | struct scatterlist *sg; |
318 | 327 | ||
319 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
328 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
320 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
329 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
321 | 330 | ||
322 | /* We hide that we have no struct page backing our stolen object |
331 | /* We hide that we have no struct page backing our stolen object |
323 | * by wrapping the contiguous physical allocation with a fake |
332 | * by wrapping the contiguous physical allocation with a fake |
324 | * dma mapping in a single scatterlist. |
333 | * dma mapping in a single scatterlist. |
325 | */ |
334 | */ |
326 | 335 | ||
327 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
336 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
328 | if (st == NULL) |
337 | if (st == NULL) |
329 | return NULL; |
338 | return NULL; |
330 | 339 | ||
331 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
340 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
332 | kfree(st); |
341 | kfree(st); |
333 | return NULL; |
342 | return NULL; |
334 | } |
343 | } |
335 | 344 | ||
336 | sg = st->sgl; |
345 | sg = st->sgl; |
337 | sg->offset = 0; |
346 | sg->offset = 0; |
338 | sg->length = size; |
347 | sg->length = size; |
339 | 348 | ||
340 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
349 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
341 | sg_dma_len(sg) = size; |
350 | sg_dma_len(sg) = size; |
342 | 351 | ||
343 | return st; |
352 | return st; |
344 | } |
353 | } |
345 | 354 | ||
346 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
355 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
347 | { |
356 | { |
348 | BUG(); |
357 | BUG(); |
349 | return -EINVAL; |
358 | return -EINVAL; |
350 | } |
359 | } |
351 | 360 | ||
352 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
361 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
353 | { |
362 | { |
354 | /* Should only be called during free */ |
363 | /* Should only be called during free */ |
355 | sg_free_table(obj->pages); |
364 | sg_free_table(obj->pages); |
356 | kfree(obj->pages); |
365 | kfree(obj->pages); |
357 | } |
366 | } |
358 | 367 | ||
359 | 368 | ||
360 | static void |
369 | static void |
361 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
370 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
362 | { |
371 | { |
363 | if (obj->stolen) { |
372 | if (obj->stolen) { |
364 | drm_mm_remove_node(obj->stolen); |
373 | drm_mm_remove_node(obj->stolen); |
365 | kfree(obj->stolen); |
374 | kfree(obj->stolen); |
366 | obj->stolen = NULL; |
375 | obj->stolen = NULL; |
367 | } |
376 | } |
368 | } |
377 | } |
369 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
378 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
370 | .get_pages = i915_gem_object_get_pages_stolen, |
379 | .get_pages = i915_gem_object_get_pages_stolen, |
371 | .put_pages = i915_gem_object_put_pages_stolen, |
380 | .put_pages = i915_gem_object_put_pages_stolen, |
372 | .release = i915_gem_object_release_stolen, |
381 | .release = i915_gem_object_release_stolen, |
373 | }; |
382 | }; |
374 | 383 | ||
375 | static struct drm_i915_gem_object * |
384 | static struct drm_i915_gem_object * |
376 | _i915_gem_object_create_stolen(struct drm_device *dev, |
385 | _i915_gem_object_create_stolen(struct drm_device *dev, |
377 | struct drm_mm_node *stolen) |
386 | struct drm_mm_node *stolen) |
378 | { |
387 | { |
379 | struct drm_i915_gem_object *obj; |
388 | struct drm_i915_gem_object *obj; |
380 | 389 | ||
381 | obj = i915_gem_object_alloc(dev); |
390 | obj = i915_gem_object_alloc(dev); |
382 | if (obj == NULL) |
391 | if (obj == NULL) |
383 | return NULL; |
392 | return NULL; |
384 | 393 | ||
385 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
394 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
386 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
395 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
387 | 396 | ||
388 | obj->pages = i915_pages_create_for_stolen(dev, |
397 | obj->pages = i915_pages_create_for_stolen(dev, |
389 | stolen->start, stolen->size); |
398 | stolen->start, stolen->size); |
390 | if (obj->pages == NULL) |
399 | if (obj->pages == NULL) |
391 | goto cleanup; |
400 | goto cleanup; |
392 | 401 | ||
393 | obj->has_dma_mapping = true; |
402 | obj->has_dma_mapping = true; |
394 | i915_gem_object_pin_pages(obj); |
403 | i915_gem_object_pin_pages(obj); |
395 | obj->stolen = stolen; |
404 | obj->stolen = stolen; |
396 | 405 | ||
397 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
406 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
398 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
407 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
399 | 408 | ||
400 | return obj; |
409 | return obj; |
401 | 410 | ||
402 | cleanup: |
411 | cleanup: |
403 | i915_gem_object_free(obj); |
412 | i915_gem_object_free(obj); |
404 | return NULL; |
413 | return NULL; |
405 | } |
414 | } |
406 | 415 | ||
407 | struct drm_i915_gem_object * |
416 | struct drm_i915_gem_object * |
408 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
417 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
409 | { |
418 | { |
410 | struct drm_i915_private *dev_priv = dev->dev_private; |
419 | struct drm_i915_private *dev_priv = dev->dev_private; |
411 | struct drm_i915_gem_object *obj; |
420 | struct drm_i915_gem_object *obj; |
412 | struct drm_mm_node *stolen; |
421 | struct drm_mm_node *stolen; |
413 | int ret; |
422 | int ret; |
414 | 423 | ||
415 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
424 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
416 | return NULL; |
425 | return NULL; |
417 | 426 | ||
418 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
427 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
419 | if (size == 0) |
428 | if (size == 0) |
420 | return NULL; |
429 | return NULL; |
421 | 430 | ||
422 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
431 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
423 | if (!stolen) |
432 | if (!stolen) |
424 | return NULL; |
433 | return NULL; |
425 | 434 | ||
426 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, |
435 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, |
427 | 4096, DRM_MM_SEARCH_DEFAULT); |
436 | 4096, DRM_MM_SEARCH_DEFAULT); |
428 | if (ret) { |
437 | if (ret) { |
429 | kfree(stolen); |
438 | kfree(stolen); |
430 | return NULL; |
439 | return NULL; |
431 | } |
440 | } |
432 | 441 | ||
433 | obj = _i915_gem_object_create_stolen(dev, stolen); |
442 | obj = _i915_gem_object_create_stolen(dev, stolen); |
434 | if (obj) |
443 | if (obj) |
435 | return obj; |
444 | return obj; |
436 | 445 | ||
437 | drm_mm_remove_node(stolen); |
446 | drm_mm_remove_node(stolen); |
438 | kfree(stolen); |
447 | kfree(stolen); |
439 | return NULL; |
448 | return NULL; |
440 | } |
449 | } |
441 | 450 | ||
442 | struct drm_i915_gem_object * |
451 | struct drm_i915_gem_object * |
443 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
452 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
444 | u32 stolen_offset, |
453 | u32 stolen_offset, |
445 | u32 gtt_offset, |
454 | u32 gtt_offset, |
446 | u32 size) |
455 | u32 size) |
447 | { |
456 | { |
448 | struct drm_i915_private *dev_priv = dev->dev_private; |
457 | struct drm_i915_private *dev_priv = dev->dev_private; |
449 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
458 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
450 | struct drm_i915_gem_object *obj; |
459 | struct drm_i915_gem_object *obj; |
451 | struct drm_mm_node *stolen; |
460 | struct drm_mm_node *stolen; |
452 | struct i915_vma *vma; |
461 | struct i915_vma *vma; |
453 | int ret; |
462 | int ret; |
454 | 463 | ||
455 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
464 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
456 | return NULL; |
465 | return NULL; |
457 | 466 | ||
458 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
467 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
459 | stolen_offset, gtt_offset, size); |
468 | stolen_offset, gtt_offset, size); |
460 | 469 | ||
461 | /* KISS and expect everything to be page-aligned */ |
470 | /* KISS and expect everything to be page-aligned */ |
462 | BUG_ON(stolen_offset & 4095); |
471 | BUG_ON(stolen_offset & 4095); |
463 | BUG_ON(size & 4095); |
472 | BUG_ON(size & 4095); |
464 | 473 | ||
465 | if (WARN_ON(size == 0)) |
474 | if (WARN_ON(size == 0)) |
466 | return NULL; |
475 | return NULL; |
467 | 476 | ||
468 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
477 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
469 | if (!stolen) |
478 | if (!stolen) |
470 | return NULL; |
479 | return NULL; |
471 | 480 | ||
472 | stolen->start = stolen_offset; |
481 | stolen->start = stolen_offset; |
473 | stolen->size = size; |
482 | stolen->size = size; |
474 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
483 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
475 | if (ret) { |
484 | if (ret) { |
476 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
485 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
477 | kfree(stolen); |
486 | kfree(stolen); |
478 | return NULL; |
487 | return NULL; |
479 | } |
488 | } |
480 | 489 | ||
481 | obj = _i915_gem_object_create_stolen(dev, stolen); |
490 | obj = _i915_gem_object_create_stolen(dev, stolen); |
482 | if (obj == NULL) { |
491 | if (obj == NULL) { |
483 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
492 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
484 | drm_mm_remove_node(stolen); |
493 | drm_mm_remove_node(stolen); |
485 | kfree(stolen); |
494 | kfree(stolen); |
486 | return NULL; |
495 | return NULL; |
487 | } |
496 | } |
488 | 497 | ||
489 | /* Some objects just need physical mem from stolen space */ |
498 | /* Some objects just need physical mem from stolen space */ |
490 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
499 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
491 | return obj; |
500 | return obj; |
492 | 501 | ||
493 | vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); |
502 | vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); |
494 | if (IS_ERR(vma)) { |
503 | if (IS_ERR(vma)) { |
495 | ret = PTR_ERR(vma); |
504 | ret = PTR_ERR(vma); |
496 | goto err_out; |
505 | goto err_out; |
497 | } |
506 | } |
498 | 507 | ||
499 | /* To simplify the initialisation sequence between KMS and GTT, |
508 | /* To simplify the initialisation sequence between KMS and GTT, |
500 | * we allow construction of the stolen object prior to |
509 | * we allow construction of the stolen object prior to |
501 | * setting up the GTT space. The actual reservation will occur |
510 | * setting up the GTT space. The actual reservation will occur |
502 | * later. |
511 | * later. |
503 | */ |
512 | */ |
504 | vma->node.start = gtt_offset; |
513 | vma->node.start = gtt_offset; |
505 | vma->node.size = size; |
514 | vma->node.size = size; |
506 | if (drm_mm_initialized(&ggtt->mm)) { |
515 | if (drm_mm_initialized(&ggtt->mm)) { |
507 | ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); |
516 | ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); |
508 | if (ret) { |
517 | if (ret) { |
509 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
518 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
510 | goto err_vma; |
519 | goto err_vma; |
511 | } |
520 | } |
512 | } |
521 | } |
513 | 522 | ||
514 | obj->has_global_gtt_mapping = 1; |
523 | vma->bound |= GLOBAL_BIND; |
515 | 524 | ||
516 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
525 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
517 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
526 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
518 | i915_gem_object_pin_pages(obj); |
527 | i915_gem_object_pin_pages(obj); |
519 | 528 | ||
520 | return obj; |
529 | return obj; |
521 | 530 | ||
522 | err_vma: |
531 | err_vma: |
523 | i915_gem_vma_destroy(vma); |
532 | i915_gem_vma_destroy(vma); |
524 | err_out: |
533 | err_out: |
525 | drm_mm_remove_node(stolen); |
534 | drm_mm_remove_node(stolen); |
526 | kfree(stolen); |
535 | kfree(stolen); |
527 | drm_gem_object_unreference(&obj->base); |
536 | drm_gem_object_unreference(&obj->base); |
528 | return NULL; |
537 | return NULL; |
529 | }>>=><=>=>=><=>=>>><>=>><>20)><20)> |
538 | }><>>>=><=>=>=><=>=>>><>=>><>20)><20)> |