Rev 6084 | Rev 6283 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6084 | Rev 6131 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008-2012 Intel Corporation |
2 | * Copyright © 2008-2012 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * Chris Wilson |
25 | * Chris Wilson |
26 | * |
26 | * |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include "i915_drv.h" |
31 | #include "i915_drv.h" |
32 | 32 | ||
33 | #define KB(x) ((x) * 1024) |
33 | #define KB(x) ((x) * 1024) |
34 | #define MB(x) (KB(x) * 1024) |
34 | #define MB(x) (KB(x) * 1024) |
35 | 35 | ||
36 | /* |
36 | /* |
37 | * The BIOS typically reserves some of the system's memory for the exclusive |
37 | * The BIOS typically reserves some of the system's memory for the exclusive |
38 | * use of the integrated graphics. This memory is no longer available for |
38 | * use of the integrated graphics. This memory is no longer available for |
39 | * use by the OS and so the user finds that his system has less memory |
39 | * use by the OS and so the user finds that his system has less memory |
40 | * available than he put in. We refer to this memory as stolen. |
40 | * available than he put in. We refer to this memory as stolen. |
41 | * |
41 | * |
42 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
42 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
43 | * goal is try to reuse that object for our own fbcon which must always |
43 | * goal is try to reuse that object for our own fbcon which must always |
44 | * be available for panics. Anything else we can reuse the stolen memory |
44 | * be available for panics. Anything else we can reuse the stolen memory |
45 | * for is a boon. |
45 | * for is a boon. |
46 | */ |
46 | */ |
47 | 47 | ||
48 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
48 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
49 | struct drm_mm_node *node, u64 size, |
49 | struct drm_mm_node *node, u64 size, |
50 | unsigned alignment, u64 start, u64 end) |
50 | unsigned alignment, u64 start, u64 end) |
51 | { |
51 | { |
52 | int ret; |
52 | int ret; |
53 | 53 | ||
54 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
54 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
55 | return -ENODEV; |
55 | return -ENODEV; |
56 | 56 | ||
57 | /* See the comment at the drm_mm_init() call for more about this check. |
57 | /* See the comment at the drm_mm_init() call for more about this check. |
58 | * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ |
58 | * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ |
59 | if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) |
59 | if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) |
60 | start = 4096; |
60 | start = 4096; |
61 | 61 | ||
62 | mutex_lock(&dev_priv->mm.stolen_lock); |
62 | mutex_lock(&dev_priv->mm.stolen_lock); |
63 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, |
63 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, |
64 | alignment, start, end, |
64 | alignment, start, end, |
65 | DRM_MM_SEARCH_DEFAULT); |
65 | DRM_MM_SEARCH_DEFAULT); |
66 | mutex_unlock(&dev_priv->mm.stolen_lock); |
66 | mutex_unlock(&dev_priv->mm.stolen_lock); |
67 | 67 | ||
68 | return ret; |
68 | return ret; |
69 | } |
69 | } |
70 | 70 | ||
71 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
71 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
72 | struct drm_mm_node *node, u64 size, |
72 | struct drm_mm_node *node, u64 size, |
73 | unsigned alignment) |
73 | unsigned alignment) |
74 | { |
74 | { |
75 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, |
75 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, |
76 | alignment, 0, |
76 | alignment, 0, |
77 | dev_priv->gtt.stolen_usable_size); |
77 | dev_priv->gtt.stolen_usable_size); |
78 | } |
78 | } |
79 | 79 | ||
80 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
80 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
81 | struct drm_mm_node *node) |
81 | struct drm_mm_node *node) |
82 | { |
82 | { |
83 | mutex_lock(&dev_priv->mm.stolen_lock); |
83 | mutex_lock(&dev_priv->mm.stolen_lock); |
84 | drm_mm_remove_node(node); |
84 | drm_mm_remove_node(node); |
85 | mutex_unlock(&dev_priv->mm.stolen_lock); |
85 | mutex_unlock(&dev_priv->mm.stolen_lock); |
86 | } |
86 | } |
87 | 87 | ||
88 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
88 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
89 | { |
89 | { |
90 | struct drm_i915_private *dev_priv = dev->dev_private; |
90 | struct drm_i915_private *dev_priv = dev->dev_private; |
91 | struct resource *r; |
91 | struct resource *r; |
92 | u32 base; |
92 | u32 base; |
93 | 93 | ||
94 | /* Almost universally we can find the Graphics Base of Stolen Memory |
94 | /* Almost universally we can find the Graphics Base of Stolen Memory |
95 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
95 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
96 | * machines this is also mirrored in the bridge device at different |
96 | * machines this is also mirrored in the bridge device at different |
97 | * locations, or in the MCHBAR. |
97 | * locations, or in the MCHBAR. |
98 | * |
98 | * |
99 | * On 865 we just check the TOUD register. |
99 | * On 865 we just check the TOUD register. |
100 | * |
100 | * |
101 | * On 830/845/85x the stolen memory base isn't available in any |
101 | * On 830/845/85x the stolen memory base isn't available in any |
102 | * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. |
102 | * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. |
103 | * |
103 | * |
104 | */ |
104 | */ |
105 | base = 0; |
105 | base = 0; |
106 | if (INTEL_INFO(dev)->gen >= 3) { |
106 | if (INTEL_INFO(dev)->gen >= 3) { |
107 | /* Read Graphics Base of Stolen Memory directly */ |
107 | /* Read Graphics Base of Stolen Memory directly */ |
108 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
108 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
109 | base &= ~((1<<20) - 1); |
109 | base &= ~((1<<20) - 1); |
110 | } else { /* GEN2 */ |
110 | } else { /* GEN2 */ |
111 | #if 0 |
111 | #if 0 |
112 | /* Stolen is immediately above Top of Memory */ |
112 | /* Stolen is immediately above Top of Memory */ |
113 | base = max_low_pfn_mapped << PAGE_SHIFT; |
113 | base = max_low_pfn_mapped << PAGE_SHIFT; |
114 | #endif |
114 | #endif |
115 | } |
115 | } |
116 | 116 | ||
117 | if (base == 0) |
117 | if (base == 0) |
118 | return 0; |
118 | return 0; |
119 | 119 | ||
120 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
120 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
121 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
121 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
122 | struct { |
122 | struct { |
123 | u32 start, end; |
123 | u32 start, end; |
124 | } stolen[2] = { |
124 | } stolen[2] = { |
125 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
125 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
126 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
126 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
127 | }; |
127 | }; |
128 | u64 gtt_start, gtt_end; |
128 | u64 gtt_start, gtt_end; |
129 | 129 | ||
130 | gtt_start = I915_READ(PGTBL_CTL); |
130 | gtt_start = I915_READ(PGTBL_CTL); |
131 | if (IS_GEN4(dev)) |
131 | if (IS_GEN4(dev)) |
132 | gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
132 | gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
133 | (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
133 | (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
134 | else |
134 | else |
135 | gtt_start &= PGTBL_ADDRESS_LO_MASK; |
135 | gtt_start &= PGTBL_ADDRESS_LO_MASK; |
136 | gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
136 | gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
137 | 137 | ||
138 | if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
138 | if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
139 | stolen[0].end = gtt_start; |
139 | stolen[0].end = gtt_start; |
140 | if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
140 | if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
141 | stolen[1].start = gtt_end; |
141 | stolen[1].start = gtt_end; |
142 | 142 | ||
143 | /* pick the larger of the two chunks */ |
143 | /* pick the larger of the two chunks */ |
144 | if (stolen[0].end - stolen[0].start > |
144 | if (stolen[0].end - stolen[0].start > |
145 | stolen[1].end - stolen[1].start) { |
145 | stolen[1].end - stolen[1].start) { |
146 | base = stolen[0].start; |
146 | base = stolen[0].start; |
147 | dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
147 | dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
148 | } else { |
148 | } else { |
149 | base = stolen[1].start; |
149 | base = stolen[1].start; |
150 | dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
150 | dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
151 | } |
151 | } |
152 | 152 | ||
153 | if (stolen[0].start != stolen[1].start || |
153 | if (stolen[0].start != stolen[1].start || |
154 | stolen[0].end != stolen[1].end) { |
154 | stolen[0].end != stolen[1].end) { |
155 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
155 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
156 | (unsigned long long) gtt_start, |
156 | (unsigned long long) gtt_start, |
157 | (unsigned long long) gtt_end - 1); |
157 | (unsigned long long) gtt_end - 1); |
158 | DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
158 | DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
159 | base, base + (u32) dev_priv->gtt.stolen_size - 1); |
159 | base, base + (u32) dev_priv->gtt.stolen_size - 1); |
160 | } |
160 | } |
161 | } |
161 | } |
162 | 162 | ||
163 | #if 0 |
163 | #if 0 |
164 | 164 | ||
165 | /* Verify that nothing else uses this physical address. Stolen |
165 | /* Verify that nothing else uses this physical address. Stolen |
166 | * memory should be reserved by the BIOS and hidden from the |
166 | * memory should be reserved by the BIOS and hidden from the |
167 | * kernel. So if the region is already marked as busy, something |
167 | * kernel. So if the region is already marked as busy, something |
168 | * is seriously wrong. |
168 | * is seriously wrong. |
169 | */ |
169 | */ |
170 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
170 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
171 | "Graphics Stolen Memory"); |
171 | "Graphics Stolen Memory"); |
172 | if (r == NULL) { |
172 | if (r == NULL) { |
173 | /* |
173 | /* |
174 | * One more attempt but this time requesting region from |
174 | * One more attempt but this time requesting region from |
175 | * base + 1, as we have seen that this resolves the region |
175 | * base + 1, as we have seen that this resolves the region |
176 | * conflict with the PCI Bus. |
176 | * conflict with the PCI Bus. |
177 | * This is a BIOS w/a: Some BIOS wrap stolen in the root |
177 | * This is a BIOS w/a: Some BIOS wrap stolen in the root |
178 | * PCI bus, but have an off-by-one error. Hence retry the |
178 | * PCI bus, but have an off-by-one error. Hence retry the |
179 | * reservation starting from 1 instead of 0. |
179 | * reservation starting from 1 instead of 0. |
180 | */ |
180 | */ |
181 | r = devm_request_mem_region(dev->dev, base + 1, |
181 | r = devm_request_mem_region(dev->dev, base + 1, |
182 | dev_priv->gtt.stolen_size - 1, |
182 | dev_priv->gtt.stolen_size - 1, |
183 | "Graphics Stolen Memory"); |
183 | "Graphics Stolen Memory"); |
184 | /* |
184 | /* |
185 | * GEN3 firmware likes to smash pci bridges into the stolen |
185 | * GEN3 firmware likes to smash pci bridges into the stolen |
186 | * range. Apparently this works. |
186 | * range. Apparently this works. |
187 | */ |
187 | */ |
188 | if (r == NULL && !IS_GEN3(dev)) { |
188 | if (r == NULL && !IS_GEN3(dev)) { |
189 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
189 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
190 | base, base + (uint32_t)dev_priv->gtt.stolen_size); |
190 | base, base + (uint32_t)dev_priv->gtt.stolen_size); |
191 | base = 0; |
191 | base = 0; |
192 | } |
192 | } |
193 | } |
193 | } |
194 | #endif |
194 | #endif |
195 | return base; |
195 | return base; |
196 | } |
196 | } |
197 | 197 | ||
198 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
198 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
199 | { |
199 | { |
200 | struct drm_i915_private *dev_priv = dev->dev_private; |
200 | struct drm_i915_private *dev_priv = dev->dev_private; |
201 | 201 | ||
202 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
202 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
203 | return; |
203 | return; |
204 | 204 | ||
205 | drm_mm_takedown(&dev_priv->mm.stolen); |
205 | drm_mm_takedown(&dev_priv->mm.stolen); |
206 | } |
206 | } |
207 | 207 | ||
208 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, |
208 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, |
209 | unsigned long *base, unsigned long *size) |
209 | unsigned long *base, unsigned long *size) |
210 | { |
210 | { |
211 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? |
211 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? |
212 | CTG_STOLEN_RESERVED : |
212 | CTG_STOLEN_RESERVED : |
213 | ELK_STOLEN_RESERVED); |
213 | ELK_STOLEN_RESERVED); |
214 | unsigned long stolen_top = dev_priv->mm.stolen_base + |
214 | unsigned long stolen_top = dev_priv->mm.stolen_base + |
215 | dev_priv->gtt.stolen_size; |
215 | dev_priv->gtt.stolen_size; |
216 | 216 | ||
217 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; |
217 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; |
218 | 218 | ||
219 | WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); |
219 | WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); |
220 | 220 | ||
221 | /* On these platforms, the register doesn't have a size field, so the |
221 | /* On these platforms, the register doesn't have a size field, so the |
222 | * size is the distance between the base and the top of the stolen |
222 | * size is the distance between the base and the top of the stolen |
223 | * memory. We also have the genuine case where base is zero and there's |
223 | * memory. We also have the genuine case where base is zero and there's |
224 | * nothing reserved. */ |
224 | * nothing reserved. */ |
225 | if (*base == 0) |
225 | if (*base == 0) |
226 | *size = 0; |
226 | *size = 0; |
227 | else |
227 | else |
228 | *size = stolen_top - *base; |
228 | *size = stolen_top - *base; |
229 | } |
229 | } |
230 | 230 | ||
231 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, |
231 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, |
232 | unsigned long *base, unsigned long *size) |
232 | unsigned long *base, unsigned long *size) |
233 | { |
233 | { |
234 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
234 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
235 | 235 | ||
236 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
236 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
237 | 237 | ||
238 | switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { |
238 | switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { |
239 | case GEN6_STOLEN_RESERVED_1M: |
239 | case GEN6_STOLEN_RESERVED_1M: |
240 | *size = 1024 * 1024; |
240 | *size = 1024 * 1024; |
241 | break; |
241 | break; |
242 | case GEN6_STOLEN_RESERVED_512K: |
242 | case GEN6_STOLEN_RESERVED_512K: |
243 | *size = 512 * 1024; |
243 | *size = 512 * 1024; |
244 | break; |
244 | break; |
245 | case GEN6_STOLEN_RESERVED_256K: |
245 | case GEN6_STOLEN_RESERVED_256K: |
246 | *size = 256 * 1024; |
246 | *size = 256 * 1024; |
247 | break; |
247 | break; |
248 | case GEN6_STOLEN_RESERVED_128K: |
248 | case GEN6_STOLEN_RESERVED_128K: |
249 | *size = 128 * 1024; |
249 | *size = 128 * 1024; |
250 | break; |
250 | break; |
251 | default: |
251 | default: |
252 | *size = 1024 * 1024; |
252 | *size = 1024 * 1024; |
253 | MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); |
253 | MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); |
254 | } |
254 | } |
255 | } |
255 | } |
256 | 256 | ||
257 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, |
257 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, |
258 | unsigned long *base, unsigned long *size) |
258 | unsigned long *base, unsigned long *size) |
259 | { |
259 | { |
260 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
260 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
261 | 261 | ||
262 | *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; |
262 | *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; |
263 | 263 | ||
264 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { |
264 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { |
265 | case GEN7_STOLEN_RESERVED_1M: |
265 | case GEN7_STOLEN_RESERVED_1M: |
266 | *size = 1024 * 1024; |
266 | *size = 1024 * 1024; |
267 | break; |
267 | break; |
268 | case GEN7_STOLEN_RESERVED_256K: |
268 | case GEN7_STOLEN_RESERVED_256K: |
269 | *size = 256 * 1024; |
269 | *size = 256 * 1024; |
270 | break; |
270 | break; |
271 | default: |
271 | default: |
272 | *size = 1024 * 1024; |
272 | *size = 1024 * 1024; |
273 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); |
273 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); |
274 | } |
274 | } |
275 | } |
275 | } |
276 | 276 | ||
277 | static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, |
277 | static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, |
278 | unsigned long *base, unsigned long *size) |
278 | unsigned long *base, unsigned long *size) |
279 | { |
279 | { |
280 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
280 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
281 | 281 | ||
282 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
282 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
283 | 283 | ||
284 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { |
284 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { |
285 | case GEN8_STOLEN_RESERVED_1M: |
285 | case GEN8_STOLEN_RESERVED_1M: |
286 | *size = 1024 * 1024; |
286 | *size = 1024 * 1024; |
287 | break; |
287 | break; |
288 | case GEN8_STOLEN_RESERVED_2M: |
288 | case GEN8_STOLEN_RESERVED_2M: |
289 | *size = 2 * 1024 * 1024; |
289 | *size = 2 * 1024 * 1024; |
290 | break; |
290 | break; |
291 | case GEN8_STOLEN_RESERVED_4M: |
291 | case GEN8_STOLEN_RESERVED_4M: |
292 | *size = 4 * 1024 * 1024; |
292 | *size = 4 * 1024 * 1024; |
293 | break; |
293 | break; |
294 | case GEN8_STOLEN_RESERVED_8M: |
294 | case GEN8_STOLEN_RESERVED_8M: |
295 | *size = 8 * 1024 * 1024; |
295 | *size = 8 * 1024 * 1024; |
296 | break; |
296 | break; |
297 | default: |
297 | default: |
298 | *size = 8 * 1024 * 1024; |
298 | *size = 8 * 1024 * 1024; |
299 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); |
299 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); |
300 | } |
300 | } |
301 | } |
301 | } |
302 | 302 | ||
303 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, |
303 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, |
304 | unsigned long *base, unsigned long *size) |
304 | unsigned long *base, unsigned long *size) |
305 | { |
305 | { |
306 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
306 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
307 | unsigned long stolen_top; |
307 | unsigned long stolen_top; |
308 | 308 | ||
309 | stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; |
309 | stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; |
310 | 310 | ||
311 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
311 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
312 | 312 | ||
313 | /* On these platforms, the register doesn't have a size field, so the |
313 | /* On these platforms, the register doesn't have a size field, so the |
314 | * size is the distance between the base and the top of the stolen |
314 | * size is the distance between the base and the top of the stolen |
315 | * memory. We also have the genuine case where base is zero and there's |
315 | * memory. We also have the genuine case where base is zero and there's |
316 | * nothing reserved. */ |
316 | * nothing reserved. */ |
317 | if (*base == 0) |
317 | if (*base == 0) |
318 | *size = 0; |
318 | *size = 0; |
319 | else |
319 | else |
320 | *size = stolen_top - *base; |
320 | *size = stolen_top - *base; |
321 | } |
321 | } |
322 | 322 | ||
323 | int i915_gem_init_stolen(struct drm_device *dev) |
323 | int i915_gem_init_stolen(struct drm_device *dev) |
324 | { |
324 | { |
325 | struct drm_i915_private *dev_priv = dev->dev_private; |
325 | struct drm_i915_private *dev_priv = dev->dev_private; |
326 | unsigned long reserved_total, reserved_base = 0, reserved_size; |
326 | unsigned long reserved_total, reserved_base = 0, reserved_size; |
327 | unsigned long stolen_top; |
327 | unsigned long stolen_top; |
328 | 328 | ||
329 | mutex_init(&dev_priv->mm.stolen_lock); |
329 | mutex_init(&dev_priv->mm.stolen_lock); |
330 | 330 | ||
331 | #ifdef CONFIG_INTEL_IOMMU |
331 | #ifdef CONFIG_INTEL_IOMMU |
332 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
332 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
333 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
333 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
334 | return 0; |
334 | return 0; |
335 | } |
335 | } |
336 | #endif |
336 | #endif |
337 | 337 | ||
338 | if (dev_priv->gtt.stolen_size == 0) |
338 | if (dev_priv->gtt.stolen_size == 0) |
339 | return 0; |
339 | return 0; |
340 | 340 | ||
341 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
341 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
342 | if (dev_priv->mm.stolen_base == 0) |
342 | if (dev_priv->mm.stolen_base == 0) |
343 | return 0; |
343 | return 0; |
344 | 344 | ||
345 | stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; |
345 | stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; |
346 | 346 | ||
347 | switch (INTEL_INFO(dev_priv)->gen) { |
347 | switch (INTEL_INFO(dev_priv)->gen) { |
348 | case 2: |
348 | case 2: |
349 | case 3: |
349 | case 3: |
350 | break; |
350 | break; |
351 | case 4: |
351 | case 4: |
352 | if (IS_G4X(dev)) |
352 | if (IS_G4X(dev)) |
353 | g4x_get_stolen_reserved(dev_priv, &reserved_base, |
353 | g4x_get_stolen_reserved(dev_priv, &reserved_base, |
354 | &reserved_size); |
354 | &reserved_size); |
355 | break; |
355 | break; |
356 | case 5: |
356 | case 5: |
357 | /* Assume the gen6 maximum for the older platforms. */ |
357 | /* Assume the gen6 maximum for the older platforms. */ |
358 | reserved_size = 1024 * 1024; |
358 | reserved_size = 1024 * 1024; |
359 | reserved_base = stolen_top - reserved_size; |
359 | reserved_base = stolen_top - reserved_size; |
360 | break; |
360 | break; |
361 | case 6: |
361 | case 6: |
362 | gen6_get_stolen_reserved(dev_priv, &reserved_base, |
362 | gen6_get_stolen_reserved(dev_priv, &reserved_base, |
363 | &reserved_size); |
363 | &reserved_size); |
364 | break; |
364 | break; |
365 | case 7: |
365 | case 7: |
366 | gen7_get_stolen_reserved(dev_priv, &reserved_base, |
366 | gen7_get_stolen_reserved(dev_priv, &reserved_base, |
367 | &reserved_size); |
367 | &reserved_size); |
368 | break; |
368 | break; |
369 | default: |
369 | default: |
370 | if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) |
370 | if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) |
371 | bdw_get_stolen_reserved(dev_priv, &reserved_base, |
371 | bdw_get_stolen_reserved(dev_priv, &reserved_base, |
372 | &reserved_size); |
372 | &reserved_size); |
373 | else |
373 | else |
374 | gen8_get_stolen_reserved(dev_priv, &reserved_base, |
374 | gen8_get_stolen_reserved(dev_priv, &reserved_base, |
375 | &reserved_size); |
375 | &reserved_size); |
376 | break; |
376 | break; |
377 | } |
377 | } |
378 | 378 | ||
379 | /* It is possible for the reserved base to be zero, but the register |
379 | /* It is possible for the reserved base to be zero, but the register |
380 | * field for size doesn't have a zero option. */ |
380 | * field for size doesn't have a zero option. */ |
381 | if (reserved_base == 0) { |
381 | if (reserved_base == 0) { |
382 | reserved_size = 0; |
382 | reserved_size = 0; |
383 | reserved_base = stolen_top; |
383 | reserved_base = stolen_top; |
384 | } |
384 | } |
385 | 385 | ||
386 | if (reserved_base < dev_priv->mm.stolen_base || |
386 | if (reserved_base < dev_priv->mm.stolen_base || |
387 | reserved_base + reserved_size > stolen_top) { |
387 | reserved_base + reserved_size > stolen_top) { |
388 | DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", |
388 | DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", |
389 | reserved_base, reserved_base + reserved_size, |
389 | reserved_base, reserved_base + reserved_size, |
390 | dev_priv->mm.stolen_base, stolen_top); |
390 | dev_priv->mm.stolen_base, stolen_top); |
391 | return 0; |
391 | return 0; |
392 | } |
392 | } |
393 | 393 | ||
394 | /* It is possible for the reserved area to end before the end of stolen |
394 | /* It is possible for the reserved area to end before the end of stolen |
395 | * memory, so just consider the start. */ |
395 | * memory, so just consider the start. */ |
396 | reserved_total = stolen_top - reserved_base; |
396 | reserved_total = stolen_top - reserved_base; |
397 | 397 | ||
398 | DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", |
398 | DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", |
399 | dev_priv->gtt.stolen_size >> 10, |
399 | dev_priv->gtt.stolen_size >> 10, |
400 | (dev_priv->gtt.stolen_size - reserved_total) >> 10); |
400 | (dev_priv->gtt.stolen_size - reserved_total) >> 10); |
401 | 401 | ||
402 | dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size - |
402 | dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size - |
403 | reserved_total; |
403 | reserved_total; |
404 | 404 | ||
405 | /* |
405 | /* |
406 | * Basic memrange allocator for stolen space. |
406 | * Basic memrange allocator for stolen space. |
407 | * |
407 | * |
408 | * TODO: Notice that some platforms require us to not use the first page |
408 | * TODO: Notice that some platforms require us to not use the first page |
409 | * of the stolen memory but their BIOSes may still put the framebuffer |
409 | * of the stolen memory but their BIOSes may still put the framebuffer |
410 | * on the first page. So we don't reserve this page for now because of |
410 | * on the first page. So we don't reserve this page for now because of |
411 | * that. Our current solution is to just prevent new nodes from being |
411 | * that. Our current solution is to just prevent new nodes from being |
412 | * inserted on the first page - see the check we have at |
412 | * inserted on the first page - see the check we have at |
413 | * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon |
413 | * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon |
414 | * problem later. |
414 | * problem later. |
415 | */ |
415 | */ |
416 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); |
416 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); |
- | 417 | ||
- | 418 | { |
|
- | 419 | u32 usable_size = dev_priv->gtt.stolen_usable_size >> 20; |
|
- | 420 | if(i915.fbsize > usable_size) |
|
- | 421 | { |
|
- | 422 | i915.fbsize = usable_size; |
|
- | 423 | DRM_DEBUG_KMS("Adjust framebuffer size to match reserved memory\n" |
|
- | 424 | "new fbsize %dMB\n",i915.fbsize); |
|
- | 425 | } |
|
- | 426 | } |
|
417 | 427 | ||
418 | return 0; |
428 | return 0; |
419 | } |
429 | } |
420 | 430 | ||
421 | static struct sg_table * |
431 | static struct sg_table * |
422 | i915_pages_create_for_stolen(struct drm_device *dev, |
432 | i915_pages_create_for_stolen(struct drm_device *dev, |
423 | u32 offset, u32 size) |
433 | u32 offset, u32 size) |
424 | { |
434 | { |
425 | struct drm_i915_private *dev_priv = dev->dev_private; |
435 | struct drm_i915_private *dev_priv = dev->dev_private; |
426 | struct sg_table *st; |
436 | struct sg_table *st; |
427 | struct scatterlist *sg; |
437 | struct scatterlist *sg; |
428 | 438 | ||
429 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
439 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
430 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
440 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
431 | 441 | ||
432 | /* We hide that we have no struct page backing our stolen object |
442 | /* We hide that we have no struct page backing our stolen object |
433 | * by wrapping the contiguous physical allocation with a fake |
443 | * by wrapping the contiguous physical allocation with a fake |
434 | * dma mapping in a single scatterlist. |
444 | * dma mapping in a single scatterlist. |
435 | */ |
445 | */ |
436 | 446 | ||
437 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
447 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
438 | if (st == NULL) |
448 | if (st == NULL) |
439 | return NULL; |
449 | return NULL; |
440 | 450 | ||
441 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
451 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
442 | kfree(st); |
452 | kfree(st); |
443 | return NULL; |
453 | return NULL; |
444 | } |
454 | } |
445 | 455 | ||
446 | sg = st->sgl; |
456 | sg = st->sgl; |
447 | sg->offset = 0; |
457 | sg->offset = 0; |
448 | sg->length = size; |
458 | sg->length = size; |
449 | 459 | ||
450 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
460 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
451 | sg_dma_len(sg) = size; |
461 | sg_dma_len(sg) = size; |
452 | 462 | ||
453 | return st; |
463 | return st; |
454 | } |
464 | } |
455 | 465 | ||
456 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
466 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
457 | { |
467 | { |
458 | BUG(); |
468 | BUG(); |
459 | return -EINVAL; |
469 | return -EINVAL; |
460 | } |
470 | } |
461 | 471 | ||
462 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
472 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
463 | { |
473 | { |
464 | /* Should only be called during free */ |
474 | /* Should only be called during free */ |
465 | sg_free_table(obj->pages); |
475 | sg_free_table(obj->pages); |
466 | kfree(obj->pages); |
476 | kfree(obj->pages); |
467 | } |
477 | } |
468 | 478 | ||
469 | 479 | ||
470 | static void |
480 | static void |
471 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
481 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
472 | { |
482 | { |
473 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
483 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
474 | 484 | ||
475 | if (obj->stolen) { |
485 | if (obj->stolen) { |
476 | i915_gem_stolen_remove_node(dev_priv, obj->stolen); |
486 | i915_gem_stolen_remove_node(dev_priv, obj->stolen); |
477 | kfree(obj->stolen); |
487 | kfree(obj->stolen); |
478 | obj->stolen = NULL; |
488 | obj->stolen = NULL; |
479 | } |
489 | } |
480 | } |
490 | } |
481 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
491 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
482 | .get_pages = i915_gem_object_get_pages_stolen, |
492 | .get_pages = i915_gem_object_get_pages_stolen, |
483 | .put_pages = i915_gem_object_put_pages_stolen, |
493 | .put_pages = i915_gem_object_put_pages_stolen, |
484 | .release = i915_gem_object_release_stolen, |
494 | .release = i915_gem_object_release_stolen, |
485 | }; |
495 | }; |
486 | 496 | ||
487 | static struct drm_i915_gem_object * |
497 | static struct drm_i915_gem_object * |
488 | _i915_gem_object_create_stolen(struct drm_device *dev, |
498 | _i915_gem_object_create_stolen(struct drm_device *dev, |
489 | struct drm_mm_node *stolen) |
499 | struct drm_mm_node *stolen) |
490 | { |
500 | { |
491 | struct drm_i915_gem_object *obj; |
501 | struct drm_i915_gem_object *obj; |
492 | 502 | ||
493 | obj = i915_gem_object_alloc(dev); |
503 | obj = i915_gem_object_alloc(dev); |
494 | if (obj == NULL) |
504 | if (obj == NULL) |
495 | return NULL; |
505 | return NULL; |
496 | 506 | ||
497 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
507 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
498 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
508 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
499 | 509 | ||
500 | obj->pages = i915_pages_create_for_stolen(dev, |
510 | obj->pages = i915_pages_create_for_stolen(dev, |
501 | stolen->start, stolen->size); |
511 | stolen->start, stolen->size); |
502 | if (obj->pages == NULL) |
512 | if (obj->pages == NULL) |
503 | goto cleanup; |
513 | goto cleanup; |
504 | 514 | ||
505 | i915_gem_object_pin_pages(obj); |
515 | i915_gem_object_pin_pages(obj); |
506 | obj->stolen = stolen; |
516 | obj->stolen = stolen; |
507 | 517 | ||
508 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
518 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
509 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
519 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
510 | 520 | ||
511 | return obj; |
521 | return obj; |
512 | 522 | ||
513 | cleanup: |
523 | cleanup: |
514 | i915_gem_object_free(obj); |
524 | i915_gem_object_free(obj); |
515 | return NULL; |
525 | return NULL; |
516 | } |
526 | } |
517 | 527 | ||
518 | struct drm_i915_gem_object * |
528 | struct drm_i915_gem_object * |
519 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
529 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
520 | { |
530 | { |
521 | struct drm_i915_private *dev_priv = dev->dev_private; |
531 | struct drm_i915_private *dev_priv = dev->dev_private; |
522 | struct drm_i915_gem_object *obj; |
532 | struct drm_i915_gem_object *obj; |
523 | struct drm_mm_node *stolen; |
533 | struct drm_mm_node *stolen; |
524 | int ret; |
534 | int ret; |
525 | 535 | ||
526 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
536 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
527 | return NULL; |
537 | return NULL; |
528 | 538 | ||
529 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
539 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
530 | if (size == 0) |
540 | if (size == 0) |
531 | return NULL; |
541 | return NULL; |
532 | 542 | ||
533 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
543 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
534 | if (!stolen) |
544 | if (!stolen) |
535 | return NULL; |
545 | return NULL; |
536 | 546 | ||
537 | ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); |
547 | ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); |
538 | if (ret) { |
548 | if (ret) { |
539 | kfree(stolen); |
549 | kfree(stolen); |
540 | return NULL; |
550 | return NULL; |
541 | } |
551 | } |
542 | 552 | ||
543 | obj = _i915_gem_object_create_stolen(dev, stolen); |
553 | obj = _i915_gem_object_create_stolen(dev, stolen); |
544 | if (obj) |
554 | if (obj) |
545 | return obj; |
555 | return obj; |
546 | 556 | ||
547 | i915_gem_stolen_remove_node(dev_priv, stolen); |
557 | i915_gem_stolen_remove_node(dev_priv, stolen); |
548 | kfree(stolen); |
558 | kfree(stolen); |
549 | return NULL; |
559 | return NULL; |
550 | } |
560 | } |
551 | 561 | ||
552 | struct drm_i915_gem_object * |
562 | struct drm_i915_gem_object * |
553 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
563 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
554 | u32 stolen_offset, |
564 | u32 stolen_offset, |
555 | u32 gtt_offset, |
565 | u32 gtt_offset, |
556 | u32 size) |
566 | u32 size) |
557 | { |
567 | { |
558 | struct drm_i915_private *dev_priv = dev->dev_private; |
568 | struct drm_i915_private *dev_priv = dev->dev_private; |
559 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
569 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
560 | struct drm_i915_gem_object *obj; |
570 | struct drm_i915_gem_object *obj; |
561 | struct drm_mm_node *stolen; |
571 | struct drm_mm_node *stolen; |
562 | struct i915_vma *vma; |
572 | struct i915_vma *vma; |
563 | int ret; |
573 | int ret; |
564 | 574 | ||
565 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
575 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
566 | return NULL; |
576 | return NULL; |
567 | 577 | ||
568 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
578 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
569 | stolen_offset, gtt_offset, size); |
579 | stolen_offset, gtt_offset, size); |
570 | 580 | ||
571 | /* KISS and expect everything to be page-aligned */ |
581 | /* KISS and expect everything to be page-aligned */ |
572 | if (WARN_ON(size == 0) || WARN_ON(size & 4095) || |
582 | if (WARN_ON(size == 0) || WARN_ON(size & 4095) || |
573 | WARN_ON(stolen_offset & 4095)) |
583 | WARN_ON(stolen_offset & 4095)) |
574 | return NULL; |
584 | return NULL; |
575 | 585 | ||
576 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
586 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
577 | if (!stolen) |
587 | if (!stolen) |
578 | return NULL; |
588 | return NULL; |
579 | 589 | ||
580 | stolen->start = stolen_offset; |
590 | stolen->start = stolen_offset; |
581 | stolen->size = size; |
591 | stolen->size = size; |
582 | mutex_lock(&dev_priv->mm.stolen_lock); |
592 | mutex_lock(&dev_priv->mm.stolen_lock); |
583 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
593 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
584 | mutex_unlock(&dev_priv->mm.stolen_lock); |
594 | mutex_unlock(&dev_priv->mm.stolen_lock); |
585 | if (ret) { |
595 | if (ret) { |
586 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
596 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
587 | kfree(stolen); |
597 | kfree(stolen); |
588 | return NULL; |
598 | return NULL; |
589 | } |
599 | } |
590 | 600 | ||
591 | obj = _i915_gem_object_create_stolen(dev, stolen); |
601 | obj = _i915_gem_object_create_stolen(dev, stolen); |
592 | if (obj == NULL) { |
602 | if (obj == NULL) { |
593 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
603 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
594 | i915_gem_stolen_remove_node(dev_priv, stolen); |
604 | i915_gem_stolen_remove_node(dev_priv, stolen); |
595 | kfree(stolen); |
605 | kfree(stolen); |
596 | return NULL; |
606 | return NULL; |
597 | } |
607 | } |
598 | 608 | ||
599 | /* Some objects just need physical mem from stolen space */ |
609 | /* Some objects just need physical mem from stolen space */ |
600 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
610 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
601 | return obj; |
611 | return obj; |
602 | 612 | ||
603 | vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); |
613 | vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); |
604 | if (IS_ERR(vma)) { |
614 | if (IS_ERR(vma)) { |
605 | ret = PTR_ERR(vma); |
615 | ret = PTR_ERR(vma); |
606 | goto err; |
616 | goto err; |
607 | } |
617 | } |
608 | 618 | ||
609 | /* To simplify the initialisation sequence between KMS and GTT, |
619 | /* To simplify the initialisation sequence between KMS and GTT, |
610 | * we allow construction of the stolen object prior to |
620 | * we allow construction of the stolen object prior to |
611 | * setting up the GTT space. The actual reservation will occur |
621 | * setting up the GTT space. The actual reservation will occur |
612 | * later. |
622 | * later. |
613 | */ |
623 | */ |
614 | vma->node.start = gtt_offset; |
624 | vma->node.start = gtt_offset; |
615 | vma->node.size = size; |
625 | vma->node.size = size; |
616 | if (drm_mm_initialized(&ggtt->mm)) { |
626 | if (drm_mm_initialized(&ggtt->mm)) { |
617 | ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); |
627 | ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); |
618 | if (ret) { |
628 | if (ret) { |
619 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
629 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
620 | goto err; |
630 | goto err; |
621 | } |
631 | } |
622 | 632 | ||
623 | vma->bound |= GLOBAL_BIND; |
633 | vma->bound |= GLOBAL_BIND; |
624 | __i915_vma_set_map_and_fenceable(vma); |
634 | __i915_vma_set_map_and_fenceable(vma); |
625 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
635 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
626 | } |
636 | } |
627 | 637 | ||
628 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
638 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
629 | i915_gem_object_pin_pages(obj); |
639 | i915_gem_object_pin_pages(obj); |
630 | 640 | ||
631 | return obj; |
641 | return obj; |
632 | 642 | ||
633 | err: |
643 | err: |
634 | drm_gem_object_unreference(&obj->base); |
644 | drm_gem_object_unreference(&obj->base); |
635 | return NULL; |
645 | return NULL; |
636 | }>>>><>=>>><>=>><>20)><20)>> |
646 | }>>>><>=>>><>=>><>20)><20)>> |