Rev 6937 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6937 | Rev 7144 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2010 Daniel Vetter |
2 | * Copyright © 2010 Daniel Vetter |
3 | * Copyright © 2011-2014 Intel Corporation |
3 | * Copyright © 2011-2014 Intel Corporation |
4 | * |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
11 | * |
12 | * The above copyright notice and this permission notice (including the next |
12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. |
14 | * Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | * IN THE SOFTWARE. |
22 | * IN THE SOFTWARE. |
23 | * |
23 | * |
24 | */ |
24 | */ |
25 | 25 | ||
26 | #include |
26 | #include |
27 | #include |
27 | #include |
28 | #include |
28 | #include |
29 | #include "i915_drv.h" |
29 | #include "i915_drv.h" |
30 | #include "i915_vgpu.h" |
30 | #include "i915_vgpu.h" |
31 | #include "i915_trace.h" |
31 | #include "i915_trace.h" |
32 | #include "intel_drv.h" |
32 | #include "intel_drv.h" |
33 | 33 | ||
34 | /** |
34 | /** |
35 | * DOC: Global GTT views |
35 | * DOC: Global GTT views |
36 | * |
36 | * |
37 | * Background and previous state |
37 | * Background and previous state |
38 | * |
38 | * |
39 | * Historically objects could exists (be bound) in global GTT space only as |
39 | * Historically objects could exists (be bound) in global GTT space only as |
40 | * singular instances with a view representing all of the object's backing pages |
40 | * singular instances with a view representing all of the object's backing pages |
41 | * in a linear fashion. This view will be called a normal view. |
41 | * in a linear fashion. This view will be called a normal view. |
42 | * |
42 | * |
43 | * To support multiple views of the same object, where the number of mapped |
43 | * To support multiple views of the same object, where the number of mapped |
44 | * pages is not equal to the backing store, or where the layout of the pages |
44 | * pages is not equal to the backing store, or where the layout of the pages |
45 | * is not linear, concept of a GGTT view was added. |
45 | * is not linear, concept of a GGTT view was added. |
46 | * |
46 | * |
47 | * One example of an alternative view is a stereo display driven by a single |
47 | * One example of an alternative view is a stereo display driven by a single |
48 | * image. In this case we would have a framebuffer looking like this |
48 | * image. In this case we would have a framebuffer looking like this |
49 | * (2x2 pages): |
49 | * (2x2 pages): |
50 | * |
50 | * |
51 | * 12 |
51 | * 12 |
52 | * 34 |
52 | * 34 |
53 | * |
53 | * |
54 | * Above would represent a normal GGTT view as normally mapped for GPU or CPU |
54 | * Above would represent a normal GGTT view as normally mapped for GPU or CPU |
55 | * rendering. In contrast, fed to the display engine would be an alternative |
55 | * rendering. In contrast, fed to the display engine would be an alternative |
56 | * view which could look something like this: |
56 | * view which could look something like this: |
57 | * |
57 | * |
58 | * 1212 |
58 | * 1212 |
59 | * 3434 |
59 | * 3434 |
60 | * |
60 | * |
61 | * In this example both the size and layout of pages in the alternative view is |
61 | * In this example both the size and layout of pages in the alternative view is |
62 | * different from the normal view. |
62 | * different from the normal view. |
63 | * |
63 | * |
64 | * Implementation and usage |
64 | * Implementation and usage |
65 | * |
65 | * |
66 | * GGTT views are implemented using VMAs and are distinguished via enum |
66 | * GGTT views are implemented using VMAs and are distinguished via enum |
67 | * i915_ggtt_view_type and struct i915_ggtt_view. |
67 | * i915_ggtt_view_type and struct i915_ggtt_view. |
68 | * |
68 | * |
69 | * A new flavour of core GEM functions which work with GGTT bound objects were |
69 | * A new flavour of core GEM functions which work with GGTT bound objects were |
70 | * added with the _ggtt_ infix, and sometimes with _view postfix to avoid |
70 | * added with the _ggtt_ infix, and sometimes with _view postfix to avoid |
71 | * renaming in large amounts of code. They take the struct i915_ggtt_view |
71 | * renaming in large amounts of code. They take the struct i915_ggtt_view |
72 | * parameter encapsulating all metadata required to implement a view. |
72 | * parameter encapsulating all metadata required to implement a view. |
73 | * |
73 | * |
74 | * As a helper for callers which are only interested in the normal view, |
74 | * As a helper for callers which are only interested in the normal view, |
75 | * globally const i915_ggtt_view_normal singleton instance exists. All old core |
75 | * globally const i915_ggtt_view_normal singleton instance exists. All old core |
76 | * GEM API functions, the ones not taking the view parameter, are operating on, |
76 | * GEM API functions, the ones not taking the view parameter, are operating on, |
77 | * or with the normal GGTT view. |
77 | * or with the normal GGTT view. |
78 | * |
78 | * |
79 | * Code wanting to add or use a new GGTT view needs to: |
79 | * Code wanting to add or use a new GGTT view needs to: |
80 | * |
80 | * |
81 | * 1. Add a new enum with a suitable name. |
81 | * 1. Add a new enum with a suitable name. |
82 | * 2. Extend the metadata in the i915_ggtt_view structure if required. |
82 | * 2. Extend the metadata in the i915_ggtt_view structure if required. |
83 | * 3. Add support to i915_get_vma_pages(). |
83 | * 3. Add support to i915_get_vma_pages(). |
84 | * |
84 | * |
85 | * New views are required to build a scatter-gather table from within the |
85 | * New views are required to build a scatter-gather table from within the |
86 | * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and |
86 | * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and |
87 | * exists for the lifetime of an VMA. |
87 | * exists for the lifetime of an VMA. |
88 | * |
88 | * |
89 | * Core API is designed to have copy semantics which means that passed in |
89 | * Core API is designed to have copy semantics which means that passed in |
90 | * struct i915_ggtt_view does not need to be persistent (left around after |
90 | * struct i915_ggtt_view does not need to be persistent (left around after |
91 | * calling the core API functions). |
91 | * calling the core API functions). |
92 | * |
92 | * |
93 | */ |
93 | */ |
94 | 94 | ||
95 | static int |
95 | static int |
96 | i915_get_ggtt_vma_pages(struct i915_vma *vma); |
96 | i915_get_ggtt_vma_pages(struct i915_vma *vma); |
97 | 97 | ||
- | 98 | const struct i915_ggtt_view i915_ggtt_view_normal = { |
|
- | 99 | .type = I915_GGTT_VIEW_NORMAL, |
|
98 | const struct i915_ggtt_view i915_ggtt_view_normal; |
100 | }; |
99 | const struct i915_ggtt_view i915_ggtt_view_rotated = { |
101 | const struct i915_ggtt_view i915_ggtt_view_rotated = { |
100 | .type = I915_GGTT_VIEW_ROTATED |
102 | .type = I915_GGTT_VIEW_ROTATED, |
101 | }; |
103 | }; |
102 | 104 | ||
103 | static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) |
105 | static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) |
104 | { |
106 | { |
105 | bool has_aliasing_ppgtt; |
107 | bool has_aliasing_ppgtt; |
106 | bool has_full_ppgtt; |
108 | bool has_full_ppgtt; |
107 | bool has_full_48bit_ppgtt; |
109 | bool has_full_48bit_ppgtt; |
108 | 110 | ||
109 | has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; |
111 | has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; |
110 | has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; |
112 | has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; |
111 | has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; |
113 | has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; |
112 | 114 | ||
113 | if (intel_vgpu_active(dev)) |
115 | if (intel_vgpu_active(dev)) |
114 | has_full_ppgtt = false; /* emulation is too hard */ |
116 | has_full_ppgtt = false; /* emulation is too hard */ |
115 | 117 | ||
116 | /* |
118 | /* |
117 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for |
119 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for |
118 | * execlists, the sole mechanism available to submit work. |
120 | * execlists, the sole mechanism available to submit work. |
119 | */ |
121 | */ |
120 | if (INTEL_INFO(dev)->gen < 9 && |
122 | if (INTEL_INFO(dev)->gen < 9 && |
121 | (enable_ppgtt == 0 || !has_aliasing_ppgtt)) |
123 | (enable_ppgtt == 0 || !has_aliasing_ppgtt)) |
122 | return 0; |
124 | return 0; |
123 | 125 | ||
124 | if (enable_ppgtt == 1) |
126 | if (enable_ppgtt == 1) |
125 | return 1; |
127 | return 1; |
126 | 128 | ||
127 | if (enable_ppgtt == 2 && has_full_ppgtt) |
129 | if (enable_ppgtt == 2 && has_full_ppgtt) |
128 | return 2; |
130 | return 2; |
129 | 131 | ||
130 | if (enable_ppgtt == 3 && has_full_48bit_ppgtt) |
132 | if (enable_ppgtt == 3 && has_full_48bit_ppgtt) |
131 | return 3; |
133 | return 3; |
132 | 134 | ||
133 | #ifdef CONFIG_INTEL_IOMMU |
135 | #ifdef CONFIG_INTEL_IOMMU |
134 | /* Disable ppgtt on SNB if VT-d is on. */ |
136 | /* Disable ppgtt on SNB if VT-d is on. */ |
135 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
137 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
136 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
138 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
137 | return 0; |
139 | return 0; |
138 | } |
140 | } |
139 | #endif |
141 | #endif |
140 | 142 | ||
141 | /* Early VLV doesn't have this */ |
143 | /* Early VLV doesn't have this */ |
142 | if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { |
144 | if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { |
143 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
145 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
144 | return 0; |
146 | return 0; |
145 | } |
147 | } |
146 | 148 | ||
147 | if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) |
149 | if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) |
148 | return has_full_48bit_ppgtt ? 3 : 2; |
150 | return has_full_48bit_ppgtt ? 3 : 2; |
149 | else |
151 | else |
150 | return has_aliasing_ppgtt ? 1 : 0; |
152 | return has_aliasing_ppgtt ? 1 : 0; |
151 | } |
153 | } |
152 | 154 | ||
153 | static int ppgtt_bind_vma(struct i915_vma *vma, |
155 | static int ppgtt_bind_vma(struct i915_vma *vma, |
154 | enum i915_cache_level cache_level, |
156 | enum i915_cache_level cache_level, |
155 | u32 unused) |
157 | u32 unused) |
156 | { |
158 | { |
157 | u32 pte_flags = 0; |
159 | u32 pte_flags = 0; |
158 | 160 | ||
159 | /* Currently applicable only to VLV */ |
161 | /* Currently applicable only to VLV */ |
160 | if (vma->obj->gt_ro) |
162 | if (vma->obj->gt_ro) |
161 | pte_flags |= PTE_READ_ONLY; |
163 | pte_flags |= PTE_READ_ONLY; |
162 | 164 | ||
163 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
165 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
164 | cache_level, pte_flags); |
166 | cache_level, pte_flags); |
165 | 167 | ||
166 | return 0; |
168 | return 0; |
167 | } |
169 | } |
168 | 170 | ||
169 | static void ppgtt_unbind_vma(struct i915_vma *vma) |
171 | static void ppgtt_unbind_vma(struct i915_vma *vma) |
170 | { |
172 | { |
171 | vma->vm->clear_range(vma->vm, |
173 | vma->vm->clear_range(vma->vm, |
172 | vma->node.start, |
174 | vma->node.start, |
173 | vma->obj->base.size, |
175 | vma->obj->base.size, |
174 | true); |
176 | true); |
175 | } |
177 | } |
176 | 178 | ||
177 | static gen8_pte_t gen8_pte_encode(dma_addr_t addr, |
179 | static gen8_pte_t gen8_pte_encode(dma_addr_t addr, |
178 | enum i915_cache_level level, |
180 | enum i915_cache_level level, |
179 | bool valid) |
181 | bool valid) |
180 | { |
182 | { |
181 | gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; |
183 | gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; |
182 | pte |= addr; |
184 | pte |= addr; |
183 | 185 | ||
184 | switch (level) { |
186 | switch (level) { |
185 | case I915_CACHE_NONE: |
187 | case I915_CACHE_NONE: |
186 | pte |= PPAT_UNCACHED_INDEX; |
188 | pte |= PPAT_UNCACHED_INDEX; |
187 | break; |
189 | break; |
188 | case I915_CACHE_WT: |
190 | case I915_CACHE_WT: |
189 | pte |= PPAT_DISPLAY_ELLC_INDEX; |
191 | pte |= PPAT_DISPLAY_ELLC_INDEX; |
190 | break; |
192 | break; |
191 | default: |
193 | default: |
192 | pte |= PPAT_CACHED_INDEX; |
194 | pte |= PPAT_CACHED_INDEX; |
193 | break; |
195 | break; |
194 | } |
196 | } |
195 | 197 | ||
196 | return pte; |
198 | return pte; |
197 | } |
199 | } |
198 | 200 | ||
199 | static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, |
201 | static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, |
200 | const enum i915_cache_level level) |
202 | const enum i915_cache_level level) |
201 | { |
203 | { |
202 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
204 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
203 | pde |= addr; |
205 | pde |= addr; |
204 | if (level != I915_CACHE_NONE) |
206 | if (level != I915_CACHE_NONE) |
205 | pde |= PPAT_CACHED_PDE_INDEX; |
207 | pde |= PPAT_CACHED_PDE_INDEX; |
206 | else |
208 | else |
207 | pde |= PPAT_UNCACHED_INDEX; |
209 | pde |= PPAT_UNCACHED_INDEX; |
208 | return pde; |
210 | return pde; |
209 | } |
211 | } |
210 | 212 | ||
211 | #define gen8_pdpe_encode gen8_pde_encode |
213 | #define gen8_pdpe_encode gen8_pde_encode |
212 | #define gen8_pml4e_encode gen8_pde_encode |
214 | #define gen8_pml4e_encode gen8_pde_encode |
213 | 215 | ||
214 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, |
216 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, |
215 | enum i915_cache_level level, |
217 | enum i915_cache_level level, |
216 | bool valid, u32 unused) |
218 | bool valid, u32 unused) |
217 | { |
219 | { |
218 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
220 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
219 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
221 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
220 | 222 | ||
221 | switch (level) { |
223 | switch (level) { |
222 | case I915_CACHE_L3_LLC: |
224 | case I915_CACHE_L3_LLC: |
223 | case I915_CACHE_LLC: |
225 | case I915_CACHE_LLC: |
224 | pte |= GEN6_PTE_CACHE_LLC; |
226 | pte |= GEN6_PTE_CACHE_LLC; |
225 | break; |
227 | break; |
226 | case I915_CACHE_NONE: |
228 | case I915_CACHE_NONE: |
227 | pte |= GEN6_PTE_UNCACHED; |
229 | pte |= GEN6_PTE_UNCACHED; |
228 | break; |
230 | break; |
229 | default: |
231 | default: |
230 | MISSING_CASE(level); |
232 | MISSING_CASE(level); |
231 | } |
233 | } |
232 | 234 | ||
233 | return pte; |
235 | return pte; |
234 | } |
236 | } |
235 | 237 | ||
236 | static gen6_pte_t ivb_pte_encode(dma_addr_t addr, |
238 | static gen6_pte_t ivb_pte_encode(dma_addr_t addr, |
237 | enum i915_cache_level level, |
239 | enum i915_cache_level level, |
238 | bool valid, u32 unused) |
240 | bool valid, u32 unused) |
239 | { |
241 | { |
240 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
242 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
241 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
243 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
242 | 244 | ||
243 | switch (level) { |
245 | switch (level) { |
244 | case I915_CACHE_L3_LLC: |
246 | case I915_CACHE_L3_LLC: |
245 | pte |= GEN7_PTE_CACHE_L3_LLC; |
247 | pte |= GEN7_PTE_CACHE_L3_LLC; |
246 | break; |
248 | break; |
247 | case I915_CACHE_LLC: |
249 | case I915_CACHE_LLC: |
248 | pte |= GEN6_PTE_CACHE_LLC; |
250 | pte |= GEN6_PTE_CACHE_LLC; |
249 | break; |
251 | break; |
250 | case I915_CACHE_NONE: |
252 | case I915_CACHE_NONE: |
251 | pte |= GEN6_PTE_UNCACHED; |
253 | pte |= GEN6_PTE_UNCACHED; |
252 | break; |
254 | break; |
253 | default: |
255 | default: |
254 | MISSING_CASE(level); |
256 | MISSING_CASE(level); |
255 | } |
257 | } |
256 | 258 | ||
257 | return pte; |
259 | return pte; |
258 | } |
260 | } |
259 | 261 | ||
260 | static gen6_pte_t byt_pte_encode(dma_addr_t addr, |
262 | static gen6_pte_t byt_pte_encode(dma_addr_t addr, |
261 | enum i915_cache_level level, |
263 | enum i915_cache_level level, |
262 | bool valid, u32 flags) |
264 | bool valid, u32 flags) |
263 | { |
265 | { |
264 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
266 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
265 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
267 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
266 | 268 | ||
267 | if (!(flags & PTE_READ_ONLY)) |
269 | if (!(flags & PTE_READ_ONLY)) |
268 | pte |= BYT_PTE_WRITEABLE; |
270 | pte |= BYT_PTE_WRITEABLE; |
269 | 271 | ||
270 | if (level != I915_CACHE_NONE) |
272 | if (level != I915_CACHE_NONE) |
271 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
273 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
272 | 274 | ||
273 | return pte; |
275 | return pte; |
274 | } |
276 | } |
275 | 277 | ||
276 | static gen6_pte_t hsw_pte_encode(dma_addr_t addr, |
278 | static gen6_pte_t hsw_pte_encode(dma_addr_t addr, |
277 | enum i915_cache_level level, |
279 | enum i915_cache_level level, |
278 | bool valid, u32 unused) |
280 | bool valid, u32 unused) |
279 | { |
281 | { |
280 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
282 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
281 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
283 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
282 | 284 | ||
283 | if (level != I915_CACHE_NONE) |
285 | if (level != I915_CACHE_NONE) |
284 | pte |= HSW_WB_LLC_AGE3; |
286 | pte |= HSW_WB_LLC_AGE3; |
285 | 287 | ||
286 | return pte; |
288 | return pte; |
287 | } |
289 | } |
288 | 290 | ||
289 | static gen6_pte_t iris_pte_encode(dma_addr_t addr, |
291 | static gen6_pte_t iris_pte_encode(dma_addr_t addr, |
290 | enum i915_cache_level level, |
292 | enum i915_cache_level level, |
291 | bool valid, u32 unused) |
293 | bool valid, u32 unused) |
292 | { |
294 | { |
293 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
295 | gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
294 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
296 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
295 | 297 | ||
296 | switch (level) { |
298 | switch (level) { |
297 | case I915_CACHE_NONE: |
299 | case I915_CACHE_NONE: |
298 | break; |
300 | break; |
299 | case I915_CACHE_WT: |
301 | case I915_CACHE_WT: |
300 | pte |= HSW_WT_ELLC_LLC_AGE3; |
302 | pte |= HSW_WT_ELLC_LLC_AGE3; |
301 | break; |
303 | break; |
302 | default: |
304 | default: |
303 | pte |= HSW_WB_ELLC_LLC_AGE3; |
305 | pte |= HSW_WB_ELLC_LLC_AGE3; |
304 | break; |
306 | break; |
305 | } |
307 | } |
306 | 308 | ||
307 | return pte; |
309 | return pte; |
308 | } |
310 | } |
309 | 311 | ||
310 | static int __setup_page_dma(struct drm_device *dev, |
312 | static int __setup_page_dma(struct drm_device *dev, |
311 | struct i915_page_dma *p, gfp_t flags) |
313 | struct i915_page_dma *p, gfp_t flags) |
312 | { |
314 | { |
313 | struct device *device = &dev->pdev->dev; |
315 | struct device *device = &dev->pdev->dev; |
314 | 316 | ||
315 | p->page = alloc_page(flags); |
317 | p->page = alloc_page(flags); |
316 | if (!p->page) |
318 | if (!p->page) |
317 | return -ENOMEM; |
319 | return -ENOMEM; |
318 | 320 | ||
319 | p->daddr = page_to_phys(p->page); |
321 | p->daddr = page_to_phys(p->page); |
320 | 322 | ||
321 | return 0; |
323 | return 0; |
322 | } |
324 | } |
323 | 325 | ||
324 | static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) |
326 | static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) |
325 | { |
327 | { |
326 | return __setup_page_dma(dev, p, GFP_KERNEL); |
328 | return __setup_page_dma(dev, p, GFP_KERNEL); |
327 | } |
329 | } |
328 | 330 | ||
329 | static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) |
331 | static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) |
330 | { |
332 | { |
331 | if (WARN_ON(!p->page)) |
333 | if (WARN_ON(!p->page)) |
332 | return; |
334 | return; |
333 | 335 | ||
334 | __free_page(p->page); |
336 | __free_page(p->page); |
335 | memset(p, 0, sizeof(*p)); |
337 | memset(p, 0, sizeof(*p)); |
336 | } |
338 | } |
337 | 339 | ||
338 | static void *kmap_page_dma(struct i915_page_dma *p) |
340 | static void *kmap_page_dma(struct i915_page_dma *p) |
339 | { |
341 | { |
340 | return kmap_atomic(p->page); |
342 | return kmap_atomic(p->page); |
341 | } |
343 | } |
342 | 344 | ||
343 | /* We use the flushing unmap only with ppgtt structures: |
345 | /* We use the flushing unmap only with ppgtt structures: |
344 | * page directories, page tables and scratch pages. |
346 | * page directories, page tables and scratch pages. |
345 | */ |
347 | */ |
346 | static void kunmap_page_dma(struct drm_device *dev, void *vaddr) |
348 | static void kunmap_page_dma(struct drm_device *dev, void *vaddr) |
347 | { |
349 | { |
348 | /* There are only few exceptions for gen >=6. chv and bxt. |
350 | /* There are only few exceptions for gen >=6. chv and bxt. |
349 | * And we are not sure about the latter so play safe for now. |
351 | * And we are not sure about the latter so play safe for now. |
350 | */ |
352 | */ |
351 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) |
353 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) |
352 | drm_clflush_virt_range(vaddr, PAGE_SIZE); |
354 | drm_clflush_virt_range(vaddr, PAGE_SIZE); |
353 | 355 | ||
354 | kunmap_atomic(vaddr); |
356 | kunmap_atomic(vaddr); |
355 | } |
357 | } |
356 | 358 | ||
357 | #define kmap_px(px) kmap_page_dma(px_base(px)) |
359 | #define kmap_px(px) kmap_page_dma(px_base(px)) |
358 | #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr)) |
360 | #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr)) |
359 | 361 | ||
360 | #define setup_px(dev, px) setup_page_dma((dev), px_base(px)) |
362 | #define setup_px(dev, px) setup_page_dma((dev), px_base(px)) |
361 | #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px)) |
363 | #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px)) |
362 | #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v)) |
364 | #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v)) |
363 | #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v)) |
365 | #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v)) |
364 | 366 | ||
365 | static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p, |
367 | static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p, |
366 | const uint64_t val) |
368 | const uint64_t val) |
367 | { |
369 | { |
368 | int i; |
370 | int i; |
369 | uint64_t * const vaddr = kmap_page_dma(p); |
371 | uint64_t * const vaddr = kmap_page_dma(p); |
370 | 372 | ||
371 | for (i = 0; i < 512; i++) |
373 | for (i = 0; i < 512; i++) |
372 | vaddr[i] = val; |
374 | vaddr[i] = val; |
373 | 375 | ||
374 | kunmap_page_dma(dev, vaddr); |
376 | kunmap_page_dma(dev, vaddr); |
375 | } |
377 | } |
376 | 378 | ||
377 | static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p, |
379 | static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p, |
378 | const uint32_t val32) |
380 | const uint32_t val32) |
379 | { |
381 | { |
380 | uint64_t v = val32; |
382 | uint64_t v = val32; |
381 | 383 | ||
382 | v = v << 32 | val32; |
384 | v = v << 32 | val32; |
383 | 385 | ||
384 | fill_page_dma(dev, p, v); |
386 | fill_page_dma(dev, p, v); |
385 | } |
387 | } |
386 | 388 | ||
387 | static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev) |
389 | static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev) |
388 | { |
390 | { |
389 | struct i915_page_scratch *sp; |
391 | struct i915_page_scratch *sp; |
390 | int ret; |
392 | int ret; |
391 | 393 | ||
392 | sp = kzalloc(sizeof(*sp), GFP_KERNEL); |
394 | sp = kzalloc(sizeof(*sp), GFP_KERNEL); |
393 | if (sp == NULL) |
395 | if (sp == NULL) |
394 | return ERR_PTR(-ENOMEM); |
396 | return ERR_PTR(-ENOMEM); |
395 | 397 | ||
396 | ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO); |
398 | ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO); |
397 | if (ret) { |
399 | if (ret) { |
398 | kfree(sp); |
400 | kfree(sp); |
399 | return ERR_PTR(ret); |
401 | return ERR_PTR(ret); |
400 | } |
402 | } |
401 | 403 | ||
402 | // set_pages_uc(px_page(sp), 1); |
404 | // set_pages_uc(px_page(sp), 1); |
403 | 405 | ||
404 | return sp; |
406 | return sp; |
405 | } |
407 | } |
406 | 408 | ||
407 | static void free_scratch_page(struct drm_device *dev, |
409 | static void free_scratch_page(struct drm_device *dev, |
408 | struct i915_page_scratch *sp) |
410 | struct i915_page_scratch *sp) |
409 | { |
411 | { |
410 | // set_pages_wb(px_page(sp), 1); |
412 | // set_pages_wb(px_page(sp), 1); |
411 | 413 | ||
412 | cleanup_px(dev, sp); |
414 | cleanup_px(dev, sp); |
413 | kfree(sp); |
415 | kfree(sp); |
414 | } |
416 | } |
415 | 417 | ||
416 | static struct i915_page_table *alloc_pt(struct drm_device *dev) |
418 | static struct i915_page_table *alloc_pt(struct drm_device *dev) |
417 | { |
419 | { |
418 | struct i915_page_table *pt; |
420 | struct i915_page_table *pt; |
419 | const size_t count = INTEL_INFO(dev)->gen >= 8 ? |
421 | const size_t count = INTEL_INFO(dev)->gen >= 8 ? |
420 | GEN8_PTES : GEN6_PTES; |
422 | GEN8_PTES : GEN6_PTES; |
421 | int ret = -ENOMEM; |
423 | int ret = -ENOMEM; |
422 | 424 | ||
423 | pt = kzalloc(sizeof(*pt), GFP_KERNEL); |
425 | pt = kzalloc(sizeof(*pt), GFP_KERNEL); |
424 | if (!pt) |
426 | if (!pt) |
425 | return ERR_PTR(-ENOMEM); |
427 | return ERR_PTR(-ENOMEM); |
426 | 428 | ||
427 | pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), |
429 | pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), |
428 | GFP_KERNEL); |
430 | GFP_KERNEL); |
429 | 431 | ||
430 | if (!pt->used_ptes) |
432 | if (!pt->used_ptes) |
431 | goto fail_bitmap; |
433 | goto fail_bitmap; |
432 | 434 | ||
433 | ret = setup_px(dev, pt); |
435 | ret = setup_px(dev, pt); |
434 | if (ret) |
436 | if (ret) |
435 | goto fail_page_m; |
437 | goto fail_page_m; |
436 | 438 | ||
437 | return pt; |
439 | return pt; |
438 | 440 | ||
439 | fail_page_m: |
441 | fail_page_m: |
440 | kfree(pt->used_ptes); |
442 | kfree(pt->used_ptes); |
441 | fail_bitmap: |
443 | fail_bitmap: |
442 | kfree(pt); |
444 | kfree(pt); |
443 | 445 | ||
444 | return ERR_PTR(ret); |
446 | return ERR_PTR(ret); |
445 | } |
447 | } |
446 | 448 | ||
447 | static void free_pt(struct drm_device *dev, struct i915_page_table *pt) |
449 | static void free_pt(struct drm_device *dev, struct i915_page_table *pt) |
448 | { |
450 | { |
449 | cleanup_px(dev, pt); |
451 | cleanup_px(dev, pt); |
450 | kfree(pt->used_ptes); |
452 | kfree(pt->used_ptes); |
451 | kfree(pt); |
453 | kfree(pt); |
452 | } |
454 | } |
453 | 455 | ||
454 | static void gen8_initialize_pt(struct i915_address_space *vm, |
456 | static void gen8_initialize_pt(struct i915_address_space *vm, |
455 | struct i915_page_table *pt) |
457 | struct i915_page_table *pt) |
456 | { |
458 | { |
457 | gen8_pte_t scratch_pte; |
459 | gen8_pte_t scratch_pte; |
458 | 460 | ||
459 | scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
461 | scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
460 | I915_CACHE_LLC, true); |
462 | I915_CACHE_LLC, true); |
461 | 463 | ||
462 | fill_px(vm->dev, pt, scratch_pte); |
464 | fill_px(vm->dev, pt, scratch_pte); |
463 | } |
465 | } |
464 | 466 | ||
465 | static void gen6_initialize_pt(struct i915_address_space *vm, |
467 | static void gen6_initialize_pt(struct i915_address_space *vm, |
466 | struct i915_page_table *pt) |
468 | struct i915_page_table *pt) |
467 | { |
469 | { |
468 | gen6_pte_t scratch_pte; |
470 | gen6_pte_t scratch_pte; |
469 | 471 | ||
470 | WARN_ON(px_dma(vm->scratch_page) == 0); |
472 | WARN_ON(px_dma(vm->scratch_page) == 0); |
471 | 473 | ||
472 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
474 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
473 | I915_CACHE_LLC, true, 0); |
475 | I915_CACHE_LLC, true, 0); |
474 | 476 | ||
475 | fill32_px(vm->dev, pt, scratch_pte); |
477 | fill32_px(vm->dev, pt, scratch_pte); |
476 | } |
478 | } |
477 | 479 | ||
478 | static struct i915_page_directory *alloc_pd(struct drm_device *dev) |
480 | static struct i915_page_directory *alloc_pd(struct drm_device *dev) |
479 | { |
481 | { |
480 | struct i915_page_directory *pd; |
482 | struct i915_page_directory *pd; |
481 | int ret = -ENOMEM; |
483 | int ret = -ENOMEM; |
482 | 484 | ||
483 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
485 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
484 | if (!pd) |
486 | if (!pd) |
485 | return ERR_PTR(-ENOMEM); |
487 | return ERR_PTR(-ENOMEM); |
486 | 488 | ||
487 | pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), |
489 | pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), |
488 | sizeof(*pd->used_pdes), GFP_KERNEL); |
490 | sizeof(*pd->used_pdes), GFP_KERNEL); |
489 | if (!pd->used_pdes) |
491 | if (!pd->used_pdes) |
490 | goto fail_bitmap; |
492 | goto fail_bitmap; |
491 | 493 | ||
492 | ret = setup_px(dev, pd); |
494 | ret = setup_px(dev, pd); |
493 | if (ret) |
495 | if (ret) |
494 | goto fail_page_m; |
496 | goto fail_page_m; |
495 | 497 | ||
496 | return pd; |
498 | return pd; |
497 | 499 | ||
498 | fail_page_m: |
500 | fail_page_m: |
499 | kfree(pd->used_pdes); |
501 | kfree(pd->used_pdes); |
500 | fail_bitmap: |
502 | fail_bitmap: |
501 | kfree(pd); |
503 | kfree(pd); |
502 | 504 | ||
503 | return ERR_PTR(ret); |
505 | return ERR_PTR(ret); |
504 | } |
506 | } |
505 | 507 | ||
506 | static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) |
508 | static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) |
507 | { |
509 | { |
508 | if (px_page(pd)) { |
510 | if (px_page(pd)) { |
509 | cleanup_px(dev, pd); |
511 | cleanup_px(dev, pd); |
510 | kfree(pd->used_pdes); |
512 | kfree(pd->used_pdes); |
511 | kfree(pd); |
513 | kfree(pd); |
512 | } |
514 | } |
513 | } |
515 | } |
514 | 516 | ||
515 | static void gen8_initialize_pd(struct i915_address_space *vm, |
517 | static void gen8_initialize_pd(struct i915_address_space *vm, |
516 | struct i915_page_directory *pd) |
518 | struct i915_page_directory *pd) |
517 | { |
519 | { |
518 | gen8_pde_t scratch_pde; |
520 | gen8_pde_t scratch_pde; |
519 | 521 | ||
520 | scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); |
522 | scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); |
521 | 523 | ||
522 | fill_px(vm->dev, pd, scratch_pde); |
524 | fill_px(vm->dev, pd, scratch_pde); |
523 | } |
525 | } |
524 | 526 | ||
525 | static int __pdp_init(struct drm_device *dev, |
527 | static int __pdp_init(struct drm_device *dev, |
526 | struct i915_page_directory_pointer *pdp) |
528 | struct i915_page_directory_pointer *pdp) |
527 | { |
529 | { |
528 | size_t pdpes = I915_PDPES_PER_PDP(dev); |
530 | size_t pdpes = I915_PDPES_PER_PDP(dev); |
529 | 531 | ||
530 | pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes), |
532 | pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes), |
531 | sizeof(unsigned long), |
533 | sizeof(unsigned long), |
532 | GFP_KERNEL); |
534 | GFP_KERNEL); |
533 | if (!pdp->used_pdpes) |
535 | if (!pdp->used_pdpes) |
534 | return -ENOMEM; |
536 | return -ENOMEM; |
535 | 537 | ||
536 | pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory), |
538 | pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory), |
537 | GFP_KERNEL); |
539 | GFP_KERNEL); |
538 | if (!pdp->page_directory) { |
540 | if (!pdp->page_directory) { |
539 | kfree(pdp->used_pdpes); |
541 | kfree(pdp->used_pdpes); |
540 | /* the PDP might be the statically allocated top level. Keep it |
542 | /* the PDP might be the statically allocated top level. Keep it |
541 | * as clean as possible */ |
543 | * as clean as possible */ |
542 | pdp->used_pdpes = NULL; |
544 | pdp->used_pdpes = NULL; |
543 | return -ENOMEM; |
545 | return -ENOMEM; |
544 | } |
546 | } |
545 | 547 | ||
546 | return 0; |
548 | return 0; |
547 | } |
549 | } |
548 | 550 | ||
549 | static void __pdp_fini(struct i915_page_directory_pointer *pdp) |
551 | static void __pdp_fini(struct i915_page_directory_pointer *pdp) |
550 | { |
552 | { |
551 | kfree(pdp->used_pdpes); |
553 | kfree(pdp->used_pdpes); |
552 | kfree(pdp->page_directory); |
554 | kfree(pdp->page_directory); |
553 | pdp->page_directory = NULL; |
555 | pdp->page_directory = NULL; |
554 | } |
556 | } |
555 | 557 | ||
556 | static struct |
558 | static struct |
557 | i915_page_directory_pointer *alloc_pdp(struct drm_device *dev) |
559 | i915_page_directory_pointer *alloc_pdp(struct drm_device *dev) |
558 | { |
560 | { |
559 | struct i915_page_directory_pointer *pdp; |
561 | struct i915_page_directory_pointer *pdp; |
560 | int ret = -ENOMEM; |
562 | int ret = -ENOMEM; |
561 | 563 | ||
562 | WARN_ON(!USES_FULL_48BIT_PPGTT(dev)); |
564 | WARN_ON(!USES_FULL_48BIT_PPGTT(dev)); |
563 | 565 | ||
564 | pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); |
566 | pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); |
565 | if (!pdp) |
567 | if (!pdp) |
566 | return ERR_PTR(-ENOMEM); |
568 | return ERR_PTR(-ENOMEM); |
567 | 569 | ||
568 | ret = __pdp_init(dev, pdp); |
570 | ret = __pdp_init(dev, pdp); |
569 | if (ret) |
571 | if (ret) |
570 | goto fail_bitmap; |
572 | goto fail_bitmap; |
571 | 573 | ||
572 | ret = setup_px(dev, pdp); |
574 | ret = setup_px(dev, pdp); |
573 | if (ret) |
575 | if (ret) |
574 | goto fail_page_m; |
576 | goto fail_page_m; |
575 | 577 | ||
576 | return pdp; |
578 | return pdp; |
577 | 579 | ||
578 | fail_page_m: |
580 | fail_page_m: |
579 | __pdp_fini(pdp); |
581 | __pdp_fini(pdp); |
580 | fail_bitmap: |
582 | fail_bitmap: |
581 | kfree(pdp); |
583 | kfree(pdp); |
582 | 584 | ||
583 | return ERR_PTR(ret); |
585 | return ERR_PTR(ret); |
584 | } |
586 | } |
585 | 587 | ||
586 | static void free_pdp(struct drm_device *dev, |
588 | static void free_pdp(struct drm_device *dev, |
587 | struct i915_page_directory_pointer *pdp) |
589 | struct i915_page_directory_pointer *pdp) |
588 | { |
590 | { |
589 | __pdp_fini(pdp); |
591 | __pdp_fini(pdp); |
590 | if (USES_FULL_48BIT_PPGTT(dev)) { |
592 | if (USES_FULL_48BIT_PPGTT(dev)) { |
591 | cleanup_px(dev, pdp); |
593 | cleanup_px(dev, pdp); |
592 | kfree(pdp); |
594 | kfree(pdp); |
593 | } |
595 | } |
594 | } |
596 | } |
595 | 597 | ||
596 | static void gen8_initialize_pdp(struct i915_address_space *vm, |
598 | static void gen8_initialize_pdp(struct i915_address_space *vm, |
597 | struct i915_page_directory_pointer *pdp) |
599 | struct i915_page_directory_pointer *pdp) |
598 | { |
600 | { |
599 | gen8_ppgtt_pdpe_t scratch_pdpe; |
601 | gen8_ppgtt_pdpe_t scratch_pdpe; |
600 | 602 | ||
601 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); |
603 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); |
602 | 604 | ||
603 | fill_px(vm->dev, pdp, scratch_pdpe); |
605 | fill_px(vm->dev, pdp, scratch_pdpe); |
604 | } |
606 | } |
605 | 607 | ||
606 | static void gen8_initialize_pml4(struct i915_address_space *vm, |
608 | static void gen8_initialize_pml4(struct i915_address_space *vm, |
607 | struct i915_pml4 *pml4) |
609 | struct i915_pml4 *pml4) |
608 | { |
610 | { |
609 | gen8_ppgtt_pml4e_t scratch_pml4e; |
611 | gen8_ppgtt_pml4e_t scratch_pml4e; |
610 | 612 | ||
611 | scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), |
613 | scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), |
612 | I915_CACHE_LLC); |
614 | I915_CACHE_LLC); |
613 | 615 | ||
614 | fill_px(vm->dev, pml4, scratch_pml4e); |
616 | fill_px(vm->dev, pml4, scratch_pml4e); |
615 | } |
617 | } |
616 | 618 | ||
617 | static void |
619 | static void |
618 | gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt, |
620 | gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt, |
619 | struct i915_page_directory_pointer *pdp, |
621 | struct i915_page_directory_pointer *pdp, |
620 | struct i915_page_directory *pd, |
622 | struct i915_page_directory *pd, |
621 | int index) |
623 | int index) |
622 | { |
624 | { |
623 | gen8_ppgtt_pdpe_t *page_directorypo; |
625 | gen8_ppgtt_pdpe_t *page_directorypo; |
624 | 626 | ||
625 | if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) |
627 | if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) |
626 | return; |
628 | return; |
627 | 629 | ||
628 | page_directorypo = kmap_px(pdp); |
630 | page_directorypo = kmap_px(pdp); |
629 | page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); |
631 | page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); |
630 | kunmap_px(ppgtt, page_directorypo); |
632 | kunmap_px(ppgtt, page_directorypo); |
631 | } |
633 | } |
632 | 634 | ||
633 | static void |
635 | static void |
634 | gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt, |
636 | gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt, |
635 | struct i915_pml4 *pml4, |
637 | struct i915_pml4 *pml4, |
636 | struct i915_page_directory_pointer *pdp, |
638 | struct i915_page_directory_pointer *pdp, |
637 | int index) |
639 | int index) |
638 | { |
640 | { |
639 | gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); |
641 | gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); |
640 | 642 | ||
641 | WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)); |
643 | WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)); |
642 | pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); |
644 | pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); |
643 | kunmap_px(ppgtt, pagemap); |
645 | kunmap_px(ppgtt, pagemap); |
644 | } |
646 | } |
645 | 647 | ||
646 | /* Broadwell Page Directory Pointer Descriptors */ |
648 | /* Broadwell Page Directory Pointer Descriptors */ |
647 | static int gen8_write_pdp(struct drm_i915_gem_request *req, |
649 | static int gen8_write_pdp(struct drm_i915_gem_request *req, |
648 | unsigned entry, |
650 | unsigned entry, |
649 | dma_addr_t addr) |
651 | dma_addr_t addr) |
650 | { |
652 | { |
651 | struct intel_engine_cs *ring = req->ring; |
653 | struct intel_engine_cs *ring = req->ring; |
652 | int ret; |
654 | int ret; |
653 | 655 | ||
654 | BUG_ON(entry >= 4); |
656 | BUG_ON(entry >= 4); |
655 | 657 | ||
656 | ret = intel_ring_begin(req, 6); |
658 | ret = intel_ring_begin(req, 6); |
657 | if (ret) |
659 | if (ret) |
658 | return ret; |
660 | return ret; |
659 | 661 | ||
660 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
662 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
661 | intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry)); |
663 | intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry)); |
662 | intel_ring_emit(ring, upper_32_bits(addr)); |
664 | intel_ring_emit(ring, upper_32_bits(addr)); |
663 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
665 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
664 | intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry)); |
666 | intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry)); |
665 | intel_ring_emit(ring, lower_32_bits(addr)); |
667 | intel_ring_emit(ring, lower_32_bits(addr)); |
666 | intel_ring_advance(ring); |
668 | intel_ring_advance(ring); |
667 | 669 | ||
668 | return 0; |
670 | return 0; |
669 | } |
671 | } |
670 | 672 | ||
671 | static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt, |
673 | static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt, |
672 | struct drm_i915_gem_request *req) |
674 | struct drm_i915_gem_request *req) |
673 | { |
675 | { |
674 | int i, ret; |
676 | int i, ret; |
675 | 677 | ||
676 | for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { |
678 | for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { |
677 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); |
679 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); |
678 | 680 | ||
679 | ret = gen8_write_pdp(req, i, pd_daddr); |
681 | ret = gen8_write_pdp(req, i, pd_daddr); |
680 | if (ret) |
682 | if (ret) |
681 | return ret; |
683 | return ret; |
682 | } |
684 | } |
683 | 685 | ||
684 | return 0; |
686 | return 0; |
685 | } |
687 | } |
686 | 688 | ||
687 | static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, |
689 | static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, |
688 | struct drm_i915_gem_request *req) |
690 | struct drm_i915_gem_request *req) |
689 | { |
691 | { |
690 | return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); |
692 | return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); |
691 | } |
693 | } |
692 | 694 | ||
693 | static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, |
695 | static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, |
694 | struct i915_page_directory_pointer *pdp, |
696 | struct i915_page_directory_pointer *pdp, |
695 | uint64_t start, |
697 | uint64_t start, |
696 | uint64_t length, |
698 | uint64_t length, |
697 | gen8_pte_t scratch_pte) |
699 | gen8_pte_t scratch_pte) |
698 | { |
700 | { |
699 | struct i915_hw_ppgtt *ppgtt = |
701 | struct i915_hw_ppgtt *ppgtt = |
700 | container_of(vm, struct i915_hw_ppgtt, base); |
702 | container_of(vm, struct i915_hw_ppgtt, base); |
701 | gen8_pte_t *pt_vaddr; |
703 | gen8_pte_t *pt_vaddr; |
702 | unsigned pdpe = gen8_pdpe_index(start); |
704 | unsigned pdpe = gen8_pdpe_index(start); |
703 | unsigned pde = gen8_pde_index(start); |
705 | unsigned pde = gen8_pde_index(start); |
704 | unsigned pte = gen8_pte_index(start); |
706 | unsigned pte = gen8_pte_index(start); |
705 | unsigned num_entries = length >> PAGE_SHIFT; |
707 | unsigned num_entries = length >> PAGE_SHIFT; |
706 | unsigned last_pte, i; |
708 | unsigned last_pte, i; |
707 | 709 | ||
708 | if (WARN_ON(!pdp)) |
710 | if (WARN_ON(!pdp)) |
709 | return; |
711 | return; |
710 | 712 | ||
711 | while (num_entries) { |
713 | while (num_entries) { |
712 | struct i915_page_directory *pd; |
714 | struct i915_page_directory *pd; |
713 | struct i915_page_table *pt; |
715 | struct i915_page_table *pt; |
714 | 716 | ||
715 | if (WARN_ON(!pdp->page_directory[pdpe])) |
717 | if (WARN_ON(!pdp->page_directory[pdpe])) |
716 | break; |
718 | break; |
717 | 719 | ||
718 | pd = pdp->page_directory[pdpe]; |
720 | pd = pdp->page_directory[pdpe]; |
719 | 721 | ||
720 | if (WARN_ON(!pd->page_table[pde])) |
722 | if (WARN_ON(!pd->page_table[pde])) |
721 | break; |
723 | break; |
722 | 724 | ||
723 | pt = pd->page_table[pde]; |
725 | pt = pd->page_table[pde]; |
724 | 726 | ||
725 | if (WARN_ON(!px_page(pt))) |
727 | if (WARN_ON(!px_page(pt))) |
726 | break; |
728 | break; |
727 | 729 | ||
728 | last_pte = pte + num_entries; |
730 | last_pte = pte + num_entries; |
729 | if (last_pte > GEN8_PTES) |
731 | if (last_pte > GEN8_PTES) |
730 | last_pte = GEN8_PTES; |
732 | last_pte = GEN8_PTES; |
731 | 733 | ||
732 | pt_vaddr = kmap_px(pt); |
734 | pt_vaddr = kmap_px(pt); |
733 | 735 | ||
734 | for (i = pte; i < last_pte; i++) { |
736 | for (i = pte; i < last_pte; i++) { |
735 | pt_vaddr[i] = scratch_pte; |
737 | pt_vaddr[i] = scratch_pte; |
736 | num_entries--; |
738 | num_entries--; |
737 | } |
739 | } |
738 | 740 | ||
739 | kunmap_px(ppgtt, pt); |
741 | kunmap_px(ppgtt, pt); |
740 | 742 | ||
741 | pte = 0; |
743 | pte = 0; |
742 | if (++pde == I915_PDES) { |
744 | if (++pde == I915_PDES) { |
743 | if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) |
745 | if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) |
744 | break; |
746 | break; |
745 | pde = 0; |
747 | pde = 0; |
746 | } |
748 | } |
747 | } |
749 | } |
748 | } |
750 | } |
749 | 751 | ||
750 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
752 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
751 | uint64_t start, |
753 | uint64_t start, |
752 | uint64_t length, |
754 | uint64_t length, |
753 | bool use_scratch) |
755 | bool use_scratch) |
754 | { |
756 | { |
755 | struct i915_hw_ppgtt *ppgtt = |
757 | struct i915_hw_ppgtt *ppgtt = |
756 | container_of(vm, struct i915_hw_ppgtt, base); |
758 | container_of(vm, struct i915_hw_ppgtt, base); |
757 | gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
759 | gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
758 | I915_CACHE_LLC, use_scratch); |
760 | I915_CACHE_LLC, use_scratch); |
759 | 761 | ||
760 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
762 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
761 | gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, |
763 | gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, |
762 | scratch_pte); |
764 | scratch_pte); |
763 | } else { |
765 | } else { |
764 | uint64_t pml4e; |
766 | uint64_t pml4e; |
765 | struct i915_page_directory_pointer *pdp; |
767 | struct i915_page_directory_pointer *pdp; |
766 | 768 | ||
767 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { |
769 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { |
768 | gen8_ppgtt_clear_pte_range(vm, pdp, start, length, |
770 | gen8_ppgtt_clear_pte_range(vm, pdp, start, length, |
769 | scratch_pte); |
771 | scratch_pte); |
770 | } |
772 | } |
771 | } |
773 | } |
772 | } |
774 | } |
773 | 775 | ||
774 | static void |
776 | static void |
775 | gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, |
777 | gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, |
776 | struct i915_page_directory_pointer *pdp, |
778 | struct i915_page_directory_pointer *pdp, |
777 | struct sg_page_iter *sg_iter, |
779 | struct sg_page_iter *sg_iter, |
778 | uint64_t start, |
780 | uint64_t start, |
779 | enum i915_cache_level cache_level) |
781 | enum i915_cache_level cache_level) |
780 | { |
782 | { |
781 | struct i915_hw_ppgtt *ppgtt = |
783 | struct i915_hw_ppgtt *ppgtt = |
782 | container_of(vm, struct i915_hw_ppgtt, base); |
784 | container_of(vm, struct i915_hw_ppgtt, base); |
783 | gen8_pte_t *pt_vaddr; |
785 | gen8_pte_t *pt_vaddr; |
784 | unsigned pdpe = gen8_pdpe_index(start); |
786 | unsigned pdpe = gen8_pdpe_index(start); |
785 | unsigned pde = gen8_pde_index(start); |
787 | unsigned pde = gen8_pde_index(start); |
786 | unsigned pte = gen8_pte_index(start); |
788 | unsigned pte = gen8_pte_index(start); |
787 | 789 | ||
788 | pt_vaddr = NULL; |
790 | pt_vaddr = NULL; |
789 | 791 | ||
790 | while (__sg_page_iter_next(sg_iter)) { |
792 | while (__sg_page_iter_next(sg_iter)) { |
791 | if (pt_vaddr == NULL) { |
793 | if (pt_vaddr == NULL) { |
792 | struct i915_page_directory *pd = pdp->page_directory[pdpe]; |
794 | struct i915_page_directory *pd = pdp->page_directory[pdpe]; |
793 | struct i915_page_table *pt = pd->page_table[pde]; |
795 | struct i915_page_table *pt = pd->page_table[pde]; |
794 | pt_vaddr = kmap_px(pt); |
796 | pt_vaddr = kmap_px(pt); |
795 | } |
797 | } |
796 | 798 | ||
797 | pt_vaddr[pte] = |
799 | pt_vaddr[pte] = |
798 | gen8_pte_encode(sg_page_iter_dma_address(sg_iter), |
800 | gen8_pte_encode(sg_page_iter_dma_address(sg_iter), |
799 | cache_level, true); |
801 | cache_level, true); |
800 | if (++pte == GEN8_PTES) { |
802 | if (++pte == GEN8_PTES) { |
801 | kunmap_px(ppgtt, pt_vaddr); |
803 | kunmap_px(ppgtt, pt_vaddr); |
802 | pt_vaddr = NULL; |
804 | pt_vaddr = NULL; |
803 | if (++pde == I915_PDES) { |
805 | if (++pde == I915_PDES) { |
804 | if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) |
806 | if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) |
805 | break; |
807 | break; |
806 | pde = 0; |
808 | pde = 0; |
807 | } |
809 | } |
808 | pte = 0; |
810 | pte = 0; |
809 | } |
811 | } |
810 | } |
812 | } |
811 | 813 | ||
812 | if (pt_vaddr) |
814 | if (pt_vaddr) |
813 | kunmap_px(ppgtt, pt_vaddr); |
815 | kunmap_px(ppgtt, pt_vaddr); |
814 | } |
816 | } |
815 | 817 | ||
816 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, |
818 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, |
817 | struct sg_table *pages, |
819 | struct sg_table *pages, |
818 | uint64_t start, |
820 | uint64_t start, |
819 | enum i915_cache_level cache_level, |
821 | enum i915_cache_level cache_level, |
820 | u32 unused) |
822 | u32 unused) |
821 | { |
823 | { |
822 | struct i915_hw_ppgtt *ppgtt = |
824 | struct i915_hw_ppgtt *ppgtt = |
823 | container_of(vm, struct i915_hw_ppgtt, base); |
825 | container_of(vm, struct i915_hw_ppgtt, base); |
824 | struct sg_page_iter sg_iter; |
826 | struct sg_page_iter sg_iter; |
825 | 827 | ||
826 | __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); |
828 | __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); |
827 | 829 | ||
828 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
830 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
829 | gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, |
831 | gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, |
830 | cache_level); |
832 | cache_level); |
831 | } else { |
833 | } else { |
832 | struct i915_page_directory_pointer *pdp; |
834 | struct i915_page_directory_pointer *pdp; |
833 | uint64_t pml4e; |
835 | uint64_t pml4e; |
834 | uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; |
836 | uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; |
835 | 837 | ||
836 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { |
838 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { |
837 | gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, |
839 | gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, |
838 | start, cache_level); |
840 | start, cache_level); |
839 | } |
841 | } |
840 | } |
842 | } |
841 | } |
843 | } |
842 | 844 | ||
843 | static void gen8_free_page_tables(struct drm_device *dev, |
845 | static void gen8_free_page_tables(struct drm_device *dev, |
844 | struct i915_page_directory *pd) |
846 | struct i915_page_directory *pd) |
845 | { |
847 | { |
846 | int i; |
848 | int i; |
847 | 849 | ||
848 | if (!px_page(pd)) |
850 | if (!px_page(pd)) |
849 | return; |
851 | return; |
850 | 852 | ||
851 | for_each_set_bit(i, pd->used_pdes, I915_PDES) { |
853 | for_each_set_bit(i, pd->used_pdes, I915_PDES) { |
852 | if (WARN_ON(!pd->page_table[i])) |
854 | if (WARN_ON(!pd->page_table[i])) |
853 | continue; |
855 | continue; |
854 | 856 | ||
855 | free_pt(dev, pd->page_table[i]); |
857 | free_pt(dev, pd->page_table[i]); |
856 | pd->page_table[i] = NULL; |
858 | pd->page_table[i] = NULL; |
857 | } |
859 | } |
858 | } |
860 | } |
859 | 861 | ||
860 | static int gen8_init_scratch(struct i915_address_space *vm) |
862 | static int gen8_init_scratch(struct i915_address_space *vm) |
861 | { |
863 | { |
862 | struct drm_device *dev = vm->dev; |
864 | struct drm_device *dev = vm->dev; |
863 | 865 | ||
864 | vm->scratch_page = alloc_scratch_page(dev); |
866 | vm->scratch_page = alloc_scratch_page(dev); |
865 | if (IS_ERR(vm->scratch_page)) |
867 | if (IS_ERR(vm->scratch_page)) |
866 | return PTR_ERR(vm->scratch_page); |
868 | return PTR_ERR(vm->scratch_page); |
867 | 869 | ||
868 | vm->scratch_pt = alloc_pt(dev); |
870 | vm->scratch_pt = alloc_pt(dev); |
869 | if (IS_ERR(vm->scratch_pt)) { |
871 | if (IS_ERR(vm->scratch_pt)) { |
870 | free_scratch_page(dev, vm->scratch_page); |
872 | free_scratch_page(dev, vm->scratch_page); |
871 | return PTR_ERR(vm->scratch_pt); |
873 | return PTR_ERR(vm->scratch_pt); |
872 | } |
874 | } |
873 | 875 | ||
874 | vm->scratch_pd = alloc_pd(dev); |
876 | vm->scratch_pd = alloc_pd(dev); |
875 | if (IS_ERR(vm->scratch_pd)) { |
877 | if (IS_ERR(vm->scratch_pd)) { |
876 | free_pt(dev, vm->scratch_pt); |
878 | free_pt(dev, vm->scratch_pt); |
877 | free_scratch_page(dev, vm->scratch_page); |
879 | free_scratch_page(dev, vm->scratch_page); |
878 | return PTR_ERR(vm->scratch_pd); |
880 | return PTR_ERR(vm->scratch_pd); |
879 | } |
881 | } |
880 | 882 | ||
881 | if (USES_FULL_48BIT_PPGTT(dev)) { |
883 | if (USES_FULL_48BIT_PPGTT(dev)) { |
882 | vm->scratch_pdp = alloc_pdp(dev); |
884 | vm->scratch_pdp = alloc_pdp(dev); |
883 | if (IS_ERR(vm->scratch_pdp)) { |
885 | if (IS_ERR(vm->scratch_pdp)) { |
884 | free_pd(dev, vm->scratch_pd); |
886 | free_pd(dev, vm->scratch_pd); |
885 | free_pt(dev, vm->scratch_pt); |
887 | free_pt(dev, vm->scratch_pt); |
886 | free_scratch_page(dev, vm->scratch_page); |
888 | free_scratch_page(dev, vm->scratch_page); |
887 | return PTR_ERR(vm->scratch_pdp); |
889 | return PTR_ERR(vm->scratch_pdp); |
888 | } |
890 | } |
889 | } |
891 | } |
890 | 892 | ||
891 | gen8_initialize_pt(vm, vm->scratch_pt); |
893 | gen8_initialize_pt(vm, vm->scratch_pt); |
892 | gen8_initialize_pd(vm, vm->scratch_pd); |
894 | gen8_initialize_pd(vm, vm->scratch_pd); |
893 | if (USES_FULL_48BIT_PPGTT(dev)) |
895 | if (USES_FULL_48BIT_PPGTT(dev)) |
894 | gen8_initialize_pdp(vm, vm->scratch_pdp); |
896 | gen8_initialize_pdp(vm, vm->scratch_pdp); |
895 | 897 | ||
896 | return 0; |
898 | return 0; |
897 | } |
899 | } |
898 | 900 | ||
899 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
901 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
900 | { |
902 | { |
901 | enum vgt_g2v_type msg; |
903 | enum vgt_g2v_type msg; |
902 | struct drm_device *dev = ppgtt->base.dev; |
904 | struct drm_device *dev = ppgtt->base.dev; |
903 | struct drm_i915_private *dev_priv = dev->dev_private; |
905 | struct drm_i915_private *dev_priv = dev->dev_private; |
904 | int i; |
906 | int i; |
905 | 907 | ||
906 | if (USES_FULL_48BIT_PPGTT(dev)) { |
908 | if (USES_FULL_48BIT_PPGTT(dev)) { |
907 | u64 daddr = px_dma(&ppgtt->pml4); |
909 | u64 daddr = px_dma(&ppgtt->pml4); |
908 | 910 | ||
909 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); |
911 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); |
910 | I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); |
912 | I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); |
911 | 913 | ||
912 | msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : |
914 | msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : |
913 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); |
915 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); |
914 | } else { |
916 | } else { |
915 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) { |
917 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) { |
916 | u64 daddr = i915_page_dir_dma_addr(ppgtt, i); |
918 | u64 daddr = i915_page_dir_dma_addr(ppgtt, i); |
917 | 919 | ||
918 | I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); |
920 | I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); |
919 | I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); |
921 | I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); |
920 | } |
922 | } |
921 | 923 | ||
922 | msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : |
924 | msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : |
923 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); |
925 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); |
924 | } |
926 | } |
925 | 927 | ||
926 | I915_WRITE(vgtif_reg(g2v_notify), msg); |
928 | I915_WRITE(vgtif_reg(g2v_notify), msg); |
927 | 929 | ||
928 | return 0; |
930 | return 0; |
929 | } |
931 | } |
930 | 932 | ||
931 | static void gen8_free_scratch(struct i915_address_space *vm) |
933 | static void gen8_free_scratch(struct i915_address_space *vm) |
932 | { |
934 | { |
933 | struct drm_device *dev = vm->dev; |
935 | struct drm_device *dev = vm->dev; |
934 | 936 | ||
935 | if (USES_FULL_48BIT_PPGTT(dev)) |
937 | if (USES_FULL_48BIT_PPGTT(dev)) |
936 | free_pdp(dev, vm->scratch_pdp); |
938 | free_pdp(dev, vm->scratch_pdp); |
937 | free_pd(dev, vm->scratch_pd); |
939 | free_pd(dev, vm->scratch_pd); |
938 | free_pt(dev, vm->scratch_pt); |
940 | free_pt(dev, vm->scratch_pt); |
939 | free_scratch_page(dev, vm->scratch_page); |
941 | free_scratch_page(dev, vm->scratch_page); |
940 | } |
942 | } |
941 | 943 | ||
942 | static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev, |
944 | static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev, |
943 | struct i915_page_directory_pointer *pdp) |
945 | struct i915_page_directory_pointer *pdp) |
944 | { |
946 | { |
945 | int i; |
947 | int i; |
946 | 948 | ||
947 | for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) { |
949 | for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) { |
948 | if (WARN_ON(!pdp->page_directory[i])) |
950 | if (WARN_ON(!pdp->page_directory[i])) |
949 | continue; |
951 | continue; |
950 | 952 | ||
951 | gen8_free_page_tables(dev, pdp->page_directory[i]); |
953 | gen8_free_page_tables(dev, pdp->page_directory[i]); |
952 | free_pd(dev, pdp->page_directory[i]); |
954 | free_pd(dev, pdp->page_directory[i]); |
953 | } |
955 | } |
954 | 956 | ||
955 | free_pdp(dev, pdp); |
957 | free_pdp(dev, pdp); |
956 | } |
958 | } |
957 | 959 | ||
958 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) |
960 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) |
959 | { |
961 | { |
960 | int i; |
962 | int i; |
961 | 963 | ||
962 | for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { |
964 | for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { |
963 | if (WARN_ON(!ppgtt->pml4.pdps[i])) |
965 | if (WARN_ON(!ppgtt->pml4.pdps[i])) |
964 | continue; |
966 | continue; |
965 | 967 | ||
966 | gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]); |
968 | gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]); |
967 | } |
969 | } |
968 | 970 | ||
969 | cleanup_px(ppgtt->base.dev, &ppgtt->pml4); |
971 | cleanup_px(ppgtt->base.dev, &ppgtt->pml4); |
970 | } |
972 | } |
971 | 973 | ||
972 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
974 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
973 | { |
975 | { |
974 | struct i915_hw_ppgtt *ppgtt = |
976 | struct i915_hw_ppgtt *ppgtt = |
975 | container_of(vm, struct i915_hw_ppgtt, base); |
977 | container_of(vm, struct i915_hw_ppgtt, base); |
976 | 978 | ||
977 | if (intel_vgpu_active(vm->dev)) |
979 | if (intel_vgpu_active(vm->dev)) |
978 | gen8_ppgtt_notify_vgt(ppgtt, false); |
980 | gen8_ppgtt_notify_vgt(ppgtt, false); |
979 | 981 | ||
980 | if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) |
982 | if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) |
981 | gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp); |
983 | gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp); |
982 | else |
984 | else |
983 | gen8_ppgtt_cleanup_4lvl(ppgtt); |
985 | gen8_ppgtt_cleanup_4lvl(ppgtt); |
984 | 986 | ||
985 | gen8_free_scratch(vm); |
987 | gen8_free_scratch(vm); |
986 | } |
988 | } |
987 | 989 | ||
988 | /** |
990 | /** |
989 | * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. |
991 | * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. |
990 | * @vm: Master vm structure. |
992 | * @vm: Master vm structure. |
991 | * @pd: Page directory for this address range. |
993 | * @pd: Page directory for this address range. |
992 | * @start: Starting virtual address to begin allocations. |
994 | * @start: Starting virtual address to begin allocations. |
993 | * @length: Size of the allocations. |
995 | * @length: Size of the allocations. |
994 | * @new_pts: Bitmap set by function with new allocations. Likely used by the |
996 | * @new_pts: Bitmap set by function with new allocations. Likely used by the |
995 | * caller to free on error. |
997 | * caller to free on error. |
996 | * |
998 | * |
997 | * Allocate the required number of page tables. Extremely similar to |
999 | * Allocate the required number of page tables. Extremely similar to |
998 | * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by |
1000 | * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by |
999 | * the page directory boundary (instead of the page directory pointer). That |
1001 | * the page directory boundary (instead of the page directory pointer). That |
1000 | * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is |
1002 | * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is |
1001 | * possible, and likely that the caller will need to use multiple calls of this |
1003 | * possible, and likely that the caller will need to use multiple calls of this |
1002 | * function to achieve the appropriate allocation. |
1004 | * function to achieve the appropriate allocation. |
1003 | * |
1005 | * |
1004 | * Return: 0 if success; negative error code otherwise. |
1006 | * Return: 0 if success; negative error code otherwise. |
1005 | */ |
1007 | */ |
1006 | static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, |
1008 | static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, |
1007 | struct i915_page_directory *pd, |
1009 | struct i915_page_directory *pd, |
1008 | uint64_t start, |
1010 | uint64_t start, |
1009 | uint64_t length, |
1011 | uint64_t length, |
1010 | unsigned long *new_pts) |
1012 | unsigned long *new_pts) |
1011 | { |
1013 | { |
1012 | struct drm_device *dev = vm->dev; |
1014 | struct drm_device *dev = vm->dev; |
1013 | struct i915_page_table *pt; |
1015 | struct i915_page_table *pt; |
1014 | uint32_t pde; |
1016 | uint32_t pde; |
1015 | 1017 | ||
1016 | gen8_for_each_pde(pt, pd, start, length, pde) { |
1018 | gen8_for_each_pde(pt, pd, start, length, pde) { |
1017 | /* Don't reallocate page tables */ |
1019 | /* Don't reallocate page tables */ |
1018 | if (test_bit(pde, pd->used_pdes)) { |
1020 | if (test_bit(pde, pd->used_pdes)) { |
1019 | /* Scratch is never allocated this way */ |
1021 | /* Scratch is never allocated this way */ |
1020 | WARN_ON(pt == vm->scratch_pt); |
1022 | WARN_ON(pt == vm->scratch_pt); |
1021 | continue; |
1023 | continue; |
1022 | } |
1024 | } |
1023 | 1025 | ||
1024 | pt = alloc_pt(dev); |
1026 | pt = alloc_pt(dev); |
1025 | if (IS_ERR(pt)) |
1027 | if (IS_ERR(pt)) |
1026 | goto unwind_out; |
1028 | goto unwind_out; |
1027 | 1029 | ||
1028 | gen8_initialize_pt(vm, pt); |
1030 | gen8_initialize_pt(vm, pt); |
1029 | pd->page_table[pde] = pt; |
1031 | pd->page_table[pde] = pt; |
1030 | __set_bit(pde, new_pts); |
1032 | __set_bit(pde, new_pts); |
1031 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT); |
1033 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT); |
1032 | } |
1034 | } |
1033 | 1035 | ||
1034 | return 0; |
1036 | return 0; |
1035 | 1037 | ||
1036 | unwind_out: |
1038 | unwind_out: |
1037 | for_each_set_bit(pde, new_pts, I915_PDES) |
1039 | for_each_set_bit(pde, new_pts, I915_PDES) |
1038 | free_pt(dev, pd->page_table[pde]); |
1040 | free_pt(dev, pd->page_table[pde]); |
1039 | 1041 | ||
1040 | return -ENOMEM; |
1042 | return -ENOMEM; |
1041 | } |
1043 | } |
1042 | 1044 | ||
1043 | /** |
1045 | /** |
1044 | * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. |
1046 | * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. |
1045 | * @vm: Master vm structure. |
1047 | * @vm: Master vm structure. |
1046 | * @pdp: Page directory pointer for this address range. |
1048 | * @pdp: Page directory pointer for this address range. |
1047 | * @start: Starting virtual address to begin allocations. |
1049 | * @start: Starting virtual address to begin allocations. |
1048 | * @length: Size of the allocations. |
1050 | * @length: Size of the allocations. |
1049 | * @new_pds: Bitmap set by function with new allocations. Likely used by the |
1051 | * @new_pds: Bitmap set by function with new allocations. Likely used by the |
1050 | * caller to free on error. |
1052 | * caller to free on error. |
1051 | * |
1053 | * |
1052 | * Allocate the required number of page directories starting at the pde index of |
1054 | * Allocate the required number of page directories starting at the pde index of |
1053 | * @start, and ending at the pde index @start + @length. This function will skip |
1055 | * @start, and ending at the pde index @start + @length. This function will skip |
1054 | * over already allocated page directories within the range, and only allocate |
1056 | * over already allocated page directories within the range, and only allocate |
1055 | * new ones, setting the appropriate pointer within the pdp as well as the |
1057 | * new ones, setting the appropriate pointer within the pdp as well as the |
1056 | * correct position in the bitmap @new_pds. |
1058 | * correct position in the bitmap @new_pds. |
1057 | * |
1059 | * |
1058 | * The function will only allocate the pages within the range for a give page |
1060 | * The function will only allocate the pages within the range for a give page |
1059 | * directory pointer. In other words, if @start + @length straddles a virtually |
1061 | * directory pointer. In other words, if @start + @length straddles a virtually |
1060 | * addressed PDP boundary (512GB for 4k pages), there will be more allocations |
1062 | * addressed PDP boundary (512GB for 4k pages), there will be more allocations |
1061 | * required by the caller, This is not currently possible, and the BUG in the |
1063 | * required by the caller, This is not currently possible, and the BUG in the |
1062 | * code will prevent it. |
1064 | * code will prevent it. |
1063 | * |
1065 | * |
1064 | * Return: 0 if success; negative error code otherwise. |
1066 | * Return: 0 if success; negative error code otherwise. |
1065 | */ |
1067 | */ |
1066 | static int |
1068 | static int |
1067 | gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, |
1069 | gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, |
1068 | struct i915_page_directory_pointer *pdp, |
1070 | struct i915_page_directory_pointer *pdp, |
1069 | uint64_t start, |
1071 | uint64_t start, |
1070 | uint64_t length, |
1072 | uint64_t length, |
1071 | unsigned long *new_pds) |
1073 | unsigned long *new_pds) |
1072 | { |
1074 | { |
1073 | struct drm_device *dev = vm->dev; |
1075 | struct drm_device *dev = vm->dev; |
1074 | struct i915_page_directory *pd; |
1076 | struct i915_page_directory *pd; |
1075 | uint32_t pdpe; |
1077 | uint32_t pdpe; |
1076 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1078 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1077 | 1079 | ||
1078 | WARN_ON(!bitmap_empty(new_pds, pdpes)); |
1080 | WARN_ON(!bitmap_empty(new_pds, pdpes)); |
1079 | 1081 | ||
1080 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1082 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1081 | if (test_bit(pdpe, pdp->used_pdpes)) |
1083 | if (test_bit(pdpe, pdp->used_pdpes)) |
1082 | continue; |
1084 | continue; |
1083 | 1085 | ||
1084 | pd = alloc_pd(dev); |
1086 | pd = alloc_pd(dev); |
1085 | if (IS_ERR(pd)) |
1087 | if (IS_ERR(pd)) |
1086 | goto unwind_out; |
1088 | goto unwind_out; |
1087 | 1089 | ||
1088 | gen8_initialize_pd(vm, pd); |
1090 | gen8_initialize_pd(vm, pd); |
1089 | pdp->page_directory[pdpe] = pd; |
1091 | pdp->page_directory[pdpe] = pd; |
1090 | __set_bit(pdpe, new_pds); |
1092 | __set_bit(pdpe, new_pds); |
1091 | trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT); |
1093 | trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT); |
1092 | } |
1094 | } |
1093 | 1095 | ||
1094 | return 0; |
1096 | return 0; |
1095 | 1097 | ||
1096 | unwind_out: |
1098 | unwind_out: |
1097 | for_each_set_bit(pdpe, new_pds, pdpes) |
1099 | for_each_set_bit(pdpe, new_pds, pdpes) |
1098 | free_pd(dev, pdp->page_directory[pdpe]); |
1100 | free_pd(dev, pdp->page_directory[pdpe]); |
1099 | 1101 | ||
1100 | return -ENOMEM; |
1102 | return -ENOMEM; |
1101 | } |
1103 | } |
1102 | 1104 | ||
1103 | /** |
1105 | /** |
1104 | * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range. |
1106 | * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range. |
1105 | * @vm: Master vm structure. |
1107 | * @vm: Master vm structure. |
1106 | * @pml4: Page map level 4 for this address range. |
1108 | * @pml4: Page map level 4 for this address range. |
1107 | * @start: Starting virtual address to begin allocations. |
1109 | * @start: Starting virtual address to begin allocations. |
1108 | * @length: Size of the allocations. |
1110 | * @length: Size of the allocations. |
1109 | * @new_pdps: Bitmap set by function with new allocations. Likely used by the |
1111 | * @new_pdps: Bitmap set by function with new allocations. Likely used by the |
1110 | * caller to free on error. |
1112 | * caller to free on error. |
1111 | * |
1113 | * |
1112 | * Allocate the required number of page directory pointers. Extremely similar to |
1114 | * Allocate the required number of page directory pointers. Extremely similar to |
1113 | * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs(). |
1115 | * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs(). |
1114 | * The main difference is here we are limited by the pml4 boundary (instead of |
1116 | * The main difference is here we are limited by the pml4 boundary (instead of |
1115 | * the page directory pointer). |
1117 | * the page directory pointer). |
1116 | * |
1118 | * |
1117 | * Return: 0 if success; negative error code otherwise. |
1119 | * Return: 0 if success; negative error code otherwise. |
1118 | */ |
1120 | */ |
1119 | static int |
1121 | static int |
1120 | gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, |
1122 | gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, |
1121 | struct i915_pml4 *pml4, |
1123 | struct i915_pml4 *pml4, |
1122 | uint64_t start, |
1124 | uint64_t start, |
1123 | uint64_t length, |
1125 | uint64_t length, |
1124 | unsigned long *new_pdps) |
1126 | unsigned long *new_pdps) |
1125 | { |
1127 | { |
1126 | struct drm_device *dev = vm->dev; |
1128 | struct drm_device *dev = vm->dev; |
1127 | struct i915_page_directory_pointer *pdp; |
1129 | struct i915_page_directory_pointer *pdp; |
1128 | uint32_t pml4e; |
1130 | uint32_t pml4e; |
1129 | 1131 | ||
1130 | WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); |
1132 | WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); |
1131 | 1133 | ||
1132 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1134 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1133 | if (!test_bit(pml4e, pml4->used_pml4es)) { |
1135 | if (!test_bit(pml4e, pml4->used_pml4es)) { |
1134 | pdp = alloc_pdp(dev); |
1136 | pdp = alloc_pdp(dev); |
1135 | if (IS_ERR(pdp)) |
1137 | if (IS_ERR(pdp)) |
1136 | goto unwind_out; |
1138 | goto unwind_out; |
1137 | 1139 | ||
1138 | gen8_initialize_pdp(vm, pdp); |
1140 | gen8_initialize_pdp(vm, pdp); |
1139 | pml4->pdps[pml4e] = pdp; |
1141 | pml4->pdps[pml4e] = pdp; |
1140 | __set_bit(pml4e, new_pdps); |
1142 | __set_bit(pml4e, new_pdps); |
1141 | trace_i915_page_directory_pointer_entry_alloc(vm, |
1143 | trace_i915_page_directory_pointer_entry_alloc(vm, |
1142 | pml4e, |
1144 | pml4e, |
1143 | start, |
1145 | start, |
1144 | GEN8_PML4E_SHIFT); |
1146 | GEN8_PML4E_SHIFT); |
1145 | } |
1147 | } |
1146 | } |
1148 | } |
1147 | 1149 | ||
1148 | return 0; |
1150 | return 0; |
1149 | 1151 | ||
1150 | unwind_out: |
1152 | unwind_out: |
1151 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) |
1153 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) |
1152 | free_pdp(dev, pml4->pdps[pml4e]); |
1154 | free_pdp(dev, pml4->pdps[pml4e]); |
1153 | 1155 | ||
1154 | return -ENOMEM; |
1156 | return -ENOMEM; |
1155 | } |
1157 | } |
1156 | 1158 | ||
1157 | static void |
1159 | static void |
1158 | free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts) |
1160 | free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts) |
1159 | { |
1161 | { |
1160 | kfree(new_pts); |
1162 | kfree(new_pts); |
1161 | kfree(new_pds); |
1163 | kfree(new_pds); |
1162 | } |
1164 | } |
1163 | 1165 | ||
1164 | /* Fills in the page directory bitmap, and the array of page tables bitmap. Both |
1166 | /* Fills in the page directory bitmap, and the array of page tables bitmap. Both |
1165 | * of these are based on the number of PDPEs in the system. |
1167 | * of these are based on the number of PDPEs in the system. |
1166 | */ |
1168 | */ |
1167 | static |
1169 | static |
1168 | int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, |
1170 | int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, |
1169 | unsigned long **new_pts, |
1171 | unsigned long **new_pts, |
1170 | uint32_t pdpes) |
1172 | uint32_t pdpes) |
1171 | { |
1173 | { |
1172 | unsigned long *pds; |
1174 | unsigned long *pds; |
1173 | unsigned long *pts; |
1175 | unsigned long *pts; |
1174 | 1176 | ||
1175 | pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY); |
1177 | pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY); |
1176 | if (!pds) |
1178 | if (!pds) |
1177 | return -ENOMEM; |
1179 | return -ENOMEM; |
1178 | 1180 | ||
1179 | pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long), |
1181 | pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long), |
1180 | GFP_TEMPORARY); |
1182 | GFP_TEMPORARY); |
1181 | if (!pts) |
1183 | if (!pts) |
1182 | goto err_out; |
1184 | goto err_out; |
1183 | 1185 | ||
1184 | *new_pds = pds; |
1186 | *new_pds = pds; |
1185 | *new_pts = pts; |
1187 | *new_pts = pts; |
1186 | 1188 | ||
1187 | return 0; |
1189 | return 0; |
1188 | 1190 | ||
1189 | err_out: |
1191 | err_out: |
1190 | free_gen8_temp_bitmaps(pds, pts); |
1192 | free_gen8_temp_bitmaps(pds, pts); |
1191 | return -ENOMEM; |
1193 | return -ENOMEM; |
1192 | } |
1194 | } |
1193 | 1195 | ||
1194 | /* PDE TLBs are a pain to invalidate on GEN8+. When we modify |
1196 | /* PDE TLBs are a pain to invalidate on GEN8+. When we modify |
1195 | * the page table structures, we mark them dirty so that |
1197 | * the page table structures, we mark them dirty so that |
1196 | * context switching/execlist queuing code takes extra steps |
1198 | * context switching/execlist queuing code takes extra steps |
1197 | * to ensure that tlbs are flushed. |
1199 | * to ensure that tlbs are flushed. |
1198 | */ |
1200 | */ |
1199 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) |
1201 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) |
1200 | { |
1202 | { |
1201 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; |
1203 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; |
1202 | } |
1204 | } |
1203 | 1205 | ||
1204 | static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, |
1206 | static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, |
1205 | struct i915_page_directory_pointer *pdp, |
1207 | struct i915_page_directory_pointer *pdp, |
1206 | uint64_t start, |
1208 | uint64_t start, |
1207 | uint64_t length) |
1209 | uint64_t length) |
1208 | { |
1210 | { |
1209 | struct i915_hw_ppgtt *ppgtt = |
1211 | struct i915_hw_ppgtt *ppgtt = |
1210 | container_of(vm, struct i915_hw_ppgtt, base); |
1212 | container_of(vm, struct i915_hw_ppgtt, base); |
1211 | unsigned long *new_page_dirs, *new_page_tables; |
1213 | unsigned long *new_page_dirs, *new_page_tables; |
1212 | struct drm_device *dev = vm->dev; |
1214 | struct drm_device *dev = vm->dev; |
1213 | struct i915_page_directory *pd; |
1215 | struct i915_page_directory *pd; |
1214 | const uint64_t orig_start = start; |
1216 | const uint64_t orig_start = start; |
1215 | const uint64_t orig_length = length; |
1217 | const uint64_t orig_length = length; |
1216 | uint32_t pdpe; |
1218 | uint32_t pdpe; |
1217 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1219 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1218 | int ret; |
1220 | int ret; |
1219 | 1221 | ||
1220 | /* Wrap is never okay since we can only represent 48b, and we don't |
1222 | /* Wrap is never okay since we can only represent 48b, and we don't |
1221 | * actually use the other side of the canonical address space. |
1223 | * actually use the other side of the canonical address space. |
1222 | */ |
1224 | */ |
1223 | if (WARN_ON(start + length < start)) |
1225 | if (WARN_ON(start + length < start)) |
1224 | return -ENODEV; |
1226 | return -ENODEV; |
1225 | 1227 | ||
1226 | if (WARN_ON(start + length > vm->total)) |
1228 | if (WARN_ON(start + length > vm->total)) |
1227 | return -ENODEV; |
1229 | return -ENODEV; |
1228 | 1230 | ||
1229 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); |
1231 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); |
1230 | if (ret) |
1232 | if (ret) |
1231 | return ret; |
1233 | return ret; |
1232 | 1234 | ||
1233 | /* Do the allocations first so we can easily bail out */ |
1235 | /* Do the allocations first so we can easily bail out */ |
1234 | ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, |
1236 | ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, |
1235 | new_page_dirs); |
1237 | new_page_dirs); |
1236 | if (ret) { |
1238 | if (ret) { |
1237 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1239 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1238 | return ret; |
1240 | return ret; |
1239 | } |
1241 | } |
1240 | 1242 | ||
1241 | /* For every page directory referenced, allocate page tables */ |
1243 | /* For every page directory referenced, allocate page tables */ |
1242 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1244 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1243 | ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, |
1245 | ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, |
1244 | new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); |
1246 | new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); |
1245 | if (ret) |
1247 | if (ret) |
1246 | goto err_out; |
1248 | goto err_out; |
1247 | } |
1249 | } |
1248 | 1250 | ||
1249 | start = orig_start; |
1251 | start = orig_start; |
1250 | length = orig_length; |
1252 | length = orig_length; |
1251 | 1253 | ||
1252 | /* Allocations have completed successfully, so set the bitmaps, and do |
1254 | /* Allocations have completed successfully, so set the bitmaps, and do |
1253 | * the mappings. */ |
1255 | * the mappings. */ |
1254 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1256 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1255 | gen8_pde_t *const page_directory = kmap_px(pd); |
1257 | gen8_pde_t *const page_directory = kmap_px(pd); |
1256 | struct i915_page_table *pt; |
1258 | struct i915_page_table *pt; |
1257 | uint64_t pd_len = length; |
1259 | uint64_t pd_len = length; |
1258 | uint64_t pd_start = start; |
1260 | uint64_t pd_start = start; |
1259 | uint32_t pde; |
1261 | uint32_t pde; |
1260 | 1262 | ||
1261 | /* Every pd should be allocated, we just did that above. */ |
1263 | /* Every pd should be allocated, we just did that above. */ |
1262 | WARN_ON(!pd); |
1264 | WARN_ON(!pd); |
1263 | 1265 | ||
1264 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
1266 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
1265 | /* Same reasoning as pd */ |
1267 | /* Same reasoning as pd */ |
1266 | WARN_ON(!pt); |
1268 | WARN_ON(!pt); |
1267 | WARN_ON(!pd_len); |
1269 | WARN_ON(!pd_len); |
1268 | WARN_ON(!gen8_pte_count(pd_start, pd_len)); |
1270 | WARN_ON(!gen8_pte_count(pd_start, pd_len)); |
1269 | 1271 | ||
1270 | /* Set our used ptes within the page table */ |
1272 | /* Set our used ptes within the page table */ |
1271 | bitmap_set(pt->used_ptes, |
1273 | bitmap_set(pt->used_ptes, |
1272 | gen8_pte_index(pd_start), |
1274 | gen8_pte_index(pd_start), |
1273 | gen8_pte_count(pd_start, pd_len)); |
1275 | gen8_pte_count(pd_start, pd_len)); |
1274 | 1276 | ||
1275 | /* Our pde is now pointing to the pagetable, pt */ |
1277 | /* Our pde is now pointing to the pagetable, pt */ |
1276 | __set_bit(pde, pd->used_pdes); |
1278 | __set_bit(pde, pd->used_pdes); |
1277 | 1279 | ||
1278 | /* Map the PDE to the page table */ |
1280 | /* Map the PDE to the page table */ |
1279 | page_directory[pde] = gen8_pde_encode(px_dma(pt), |
1281 | page_directory[pde] = gen8_pde_encode(px_dma(pt), |
1280 | I915_CACHE_LLC); |
1282 | I915_CACHE_LLC); |
1281 | trace_i915_page_table_entry_map(&ppgtt->base, pde, pt, |
1283 | trace_i915_page_table_entry_map(&ppgtt->base, pde, pt, |
1282 | gen8_pte_index(start), |
1284 | gen8_pte_index(start), |
1283 | gen8_pte_count(start, length), |
1285 | gen8_pte_count(start, length), |
1284 | GEN8_PTES); |
1286 | GEN8_PTES); |
1285 | 1287 | ||
1286 | /* NB: We haven't yet mapped ptes to pages. At this |
1288 | /* NB: We haven't yet mapped ptes to pages. At this |
1287 | * point we're still relying on insert_entries() */ |
1289 | * point we're still relying on insert_entries() */ |
1288 | } |
1290 | } |
1289 | 1291 | ||
1290 | kunmap_px(ppgtt, page_directory); |
1292 | kunmap_px(ppgtt, page_directory); |
1291 | __set_bit(pdpe, pdp->used_pdpes); |
1293 | __set_bit(pdpe, pdp->used_pdpes); |
1292 | gen8_setup_page_directory(ppgtt, pdp, pd, pdpe); |
1294 | gen8_setup_page_directory(ppgtt, pdp, pd, pdpe); |
1293 | } |
1295 | } |
1294 | 1296 | ||
1295 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1297 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1296 | mark_tlbs_dirty(ppgtt); |
1298 | mark_tlbs_dirty(ppgtt); |
1297 | return 0; |
1299 | return 0; |
1298 | 1300 | ||
1299 | err_out: |
1301 | err_out: |
1300 | while (pdpe--) { |
1302 | while (pdpe--) { |
1301 | unsigned long temp; |
1303 | unsigned long temp; |
1302 | 1304 | ||
1303 | for_each_set_bit(temp, new_page_tables + pdpe * |
1305 | for_each_set_bit(temp, new_page_tables + pdpe * |
1304 | BITS_TO_LONGS(I915_PDES), I915_PDES) |
1306 | BITS_TO_LONGS(I915_PDES), I915_PDES) |
1305 | free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); |
1307 | free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); |
1306 | } |
1308 | } |
1307 | 1309 | ||
1308 | for_each_set_bit(pdpe, new_page_dirs, pdpes) |
1310 | for_each_set_bit(pdpe, new_page_dirs, pdpes) |
1309 | free_pd(dev, pdp->page_directory[pdpe]); |
1311 | free_pd(dev, pdp->page_directory[pdpe]); |
1310 | 1312 | ||
1311 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1313 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1312 | mark_tlbs_dirty(ppgtt); |
1314 | mark_tlbs_dirty(ppgtt); |
1313 | return ret; |
1315 | return ret; |
1314 | } |
1316 | } |
1315 | 1317 | ||
1316 | static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, |
1318 | static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, |
1317 | struct i915_pml4 *pml4, |
1319 | struct i915_pml4 *pml4, |
1318 | uint64_t start, |
1320 | uint64_t start, |
1319 | uint64_t length) |
1321 | uint64_t length) |
1320 | { |
1322 | { |
1321 | DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); |
1323 | DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); |
1322 | struct i915_hw_ppgtt *ppgtt = |
1324 | struct i915_hw_ppgtt *ppgtt = |
1323 | container_of(vm, struct i915_hw_ppgtt, base); |
1325 | container_of(vm, struct i915_hw_ppgtt, base); |
1324 | struct i915_page_directory_pointer *pdp; |
1326 | struct i915_page_directory_pointer *pdp; |
1325 | uint64_t pml4e; |
1327 | uint64_t pml4e; |
1326 | int ret = 0; |
1328 | int ret = 0; |
1327 | 1329 | ||
1328 | /* Do the pml4 allocations first, so we don't need to track the newly |
1330 | /* Do the pml4 allocations first, so we don't need to track the newly |
1329 | * allocated tables below the pdp */ |
1331 | * allocated tables below the pdp */ |
1330 | bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4); |
1332 | bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4); |
1331 | 1333 | ||
1332 | /* The pagedirectory and pagetable allocations are done in the shared 3 |
1334 | /* The pagedirectory and pagetable allocations are done in the shared 3 |
1333 | * and 4 level code. Just allocate the pdps. |
1335 | * and 4 level code. Just allocate the pdps. |
1334 | */ |
1336 | */ |
1335 | ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length, |
1337 | ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length, |
1336 | new_pdps); |
1338 | new_pdps); |
1337 | if (ret) |
1339 | if (ret) |
1338 | return ret; |
1340 | return ret; |
1339 | 1341 | ||
1340 | WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2, |
1342 | WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2, |
1341 | "The allocation has spanned more than 512GB. " |
1343 | "The allocation has spanned more than 512GB. " |
1342 | "It is highly likely this is incorrect."); |
1344 | "It is highly likely this is incorrect."); |
1343 | 1345 | ||
1344 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1346 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1345 | WARN_ON(!pdp); |
1347 | WARN_ON(!pdp); |
1346 | 1348 | ||
1347 | ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); |
1349 | ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); |
1348 | if (ret) |
1350 | if (ret) |
1349 | goto err_out; |
1351 | goto err_out; |
1350 | 1352 | ||
1351 | gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e); |
1353 | gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e); |
1352 | } |
1354 | } |
1353 | 1355 | ||
1354 | bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es, |
1356 | bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es, |
1355 | GEN8_PML4ES_PER_PML4); |
1357 | GEN8_PML4ES_PER_PML4); |
1356 | 1358 | ||
1357 | return 0; |
1359 | return 0; |
1358 | 1360 | ||
1359 | err_out: |
1361 | err_out: |
1360 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) |
1362 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) |
1361 | gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]); |
1363 | gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]); |
1362 | 1364 | ||
1363 | return ret; |
1365 | return ret; |
1364 | } |
1366 | } |
1365 | 1367 | ||
1366 | static int gen8_alloc_va_range(struct i915_address_space *vm, |
1368 | static int gen8_alloc_va_range(struct i915_address_space *vm, |
1367 | uint64_t start, uint64_t length) |
1369 | uint64_t start, uint64_t length) |
1368 | { |
1370 | { |
1369 | struct i915_hw_ppgtt *ppgtt = |
1371 | struct i915_hw_ppgtt *ppgtt = |
1370 | container_of(vm, struct i915_hw_ppgtt, base); |
1372 | container_of(vm, struct i915_hw_ppgtt, base); |
1371 | 1373 | ||
1372 | if (USES_FULL_48BIT_PPGTT(vm->dev)) |
1374 | if (USES_FULL_48BIT_PPGTT(vm->dev)) |
1373 | return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); |
1375 | return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); |
1374 | else |
1376 | else |
1375 | return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); |
1377 | return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); |
1376 | } |
1378 | } |
1377 | 1379 | ||
1378 | static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, |
1380 | static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, |
1379 | uint64_t start, uint64_t length, |
1381 | uint64_t start, uint64_t length, |
1380 | gen8_pte_t scratch_pte, |
1382 | gen8_pte_t scratch_pte, |
1381 | struct seq_file *m) |
1383 | struct seq_file *m) |
1382 | { |
1384 | { |
1383 | struct i915_page_directory *pd; |
1385 | struct i915_page_directory *pd; |
1384 | uint32_t pdpe; |
1386 | uint32_t pdpe; |
1385 | 1387 | ||
1386 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1388 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1387 | struct i915_page_table *pt; |
1389 | struct i915_page_table *pt; |
1388 | uint64_t pd_len = length; |
1390 | uint64_t pd_len = length; |
1389 | uint64_t pd_start = start; |
1391 | uint64_t pd_start = start; |
1390 | uint32_t pde; |
1392 | uint32_t pde; |
1391 | 1393 | ||
1392 | if (!test_bit(pdpe, pdp->used_pdpes)) |
1394 | if (!test_bit(pdpe, pdp->used_pdpes)) |
1393 | continue; |
1395 | continue; |
1394 | 1396 | ||
1395 | seq_printf(m, "\tPDPE #%d\n", pdpe); |
1397 | seq_printf(m, "\tPDPE #%d\n", pdpe); |
1396 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
1398 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
1397 | uint32_t pte; |
1399 | uint32_t pte; |
1398 | gen8_pte_t *pt_vaddr; |
1400 | gen8_pte_t *pt_vaddr; |
1399 | 1401 | ||
1400 | if (!test_bit(pde, pd->used_pdes)) |
1402 | if (!test_bit(pde, pd->used_pdes)) |
1401 | continue; |
1403 | continue; |
1402 | 1404 | ||
1403 | pt_vaddr = kmap_px(pt); |
1405 | pt_vaddr = kmap_px(pt); |
1404 | for (pte = 0; pte < GEN8_PTES; pte += 4) { |
1406 | for (pte = 0; pte < GEN8_PTES; pte += 4) { |
1405 | uint64_t va = |
1407 | uint64_t va = |
1406 | (pdpe << GEN8_PDPE_SHIFT) | |
1408 | (pdpe << GEN8_PDPE_SHIFT) | |
1407 | (pde << GEN8_PDE_SHIFT) | |
1409 | (pde << GEN8_PDE_SHIFT) | |
1408 | (pte << GEN8_PTE_SHIFT); |
1410 | (pte << GEN8_PTE_SHIFT); |
1409 | int i; |
1411 | int i; |
1410 | bool found = false; |
1412 | bool found = false; |
1411 | 1413 | ||
1412 | for (i = 0; i < 4; i++) |
1414 | for (i = 0; i < 4; i++) |
1413 | if (pt_vaddr[pte + i] != scratch_pte) |
1415 | if (pt_vaddr[pte + i] != scratch_pte) |
1414 | found = true; |
1416 | found = true; |
1415 | if (!found) |
1417 | if (!found) |
1416 | continue; |
1418 | continue; |
1417 | 1419 | ||
1418 | seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); |
1420 | seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); |
1419 | for (i = 0; i < 4; i++) { |
1421 | for (i = 0; i < 4; i++) { |
1420 | if (pt_vaddr[pte + i] != scratch_pte) |
1422 | if (pt_vaddr[pte + i] != scratch_pte) |
1421 | seq_printf(m, " %llx", pt_vaddr[pte + i]); |
1423 | seq_printf(m, " %llx", pt_vaddr[pte + i]); |
1422 | else |
1424 | else |
1423 | seq_puts(m, " SCRATCH "); |
1425 | seq_puts(m, " SCRATCH "); |
1424 | } |
1426 | } |
1425 | seq_puts(m, "\n"); |
1427 | seq_puts(m, "\n"); |
1426 | } |
1428 | } |
1427 | /* don't use kunmap_px, it could trigger |
1429 | /* don't use kunmap_px, it could trigger |
1428 | * an unnecessary flush. |
1430 | * an unnecessary flush. |
1429 | */ |
1431 | */ |
1430 | kunmap_atomic(pt_vaddr); |
1432 | kunmap_atomic(pt_vaddr); |
1431 | } |
1433 | } |
1432 | } |
1434 | } |
1433 | } |
1435 | } |
1434 | 1436 | ||
1435 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
1437 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
1436 | { |
1438 | { |
1437 | struct i915_address_space *vm = &ppgtt->base; |
1439 | struct i915_address_space *vm = &ppgtt->base; |
1438 | uint64_t start = ppgtt->base.start; |
1440 | uint64_t start = ppgtt->base.start; |
1439 | uint64_t length = ppgtt->base.total; |
1441 | uint64_t length = ppgtt->base.total; |
1440 | gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
1442 | gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
1441 | I915_CACHE_LLC, true); |
1443 | I915_CACHE_LLC, true); |
1442 | 1444 | ||
1443 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
1445 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
1444 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); |
1446 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); |
1445 | } else { |
1447 | } else { |
1446 | uint64_t pml4e; |
1448 | uint64_t pml4e; |
1447 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
1449 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
1448 | struct i915_page_directory_pointer *pdp; |
1450 | struct i915_page_directory_pointer *pdp; |
1449 | 1451 | ||
1450 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1452 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1451 | if (!test_bit(pml4e, pml4->used_pml4es)) |
1453 | if (!test_bit(pml4e, pml4->used_pml4es)) |
1452 | continue; |
1454 | continue; |
1453 | 1455 | ||
1454 | seq_printf(m, " PML4E #%llu\n", pml4e); |
1456 | seq_printf(m, " PML4E #%llu\n", pml4e); |
1455 | gen8_dump_pdp(pdp, start, length, scratch_pte, m); |
1457 | gen8_dump_pdp(pdp, start, length, scratch_pte, m); |
1456 | } |
1458 | } |
1457 | } |
1459 | } |
1458 | } |
1460 | } |
1459 | 1461 | ||
1460 | static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) |
1462 | static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) |
1461 | { |
1463 | { |
1462 | unsigned long *new_page_dirs, *new_page_tables; |
1464 | unsigned long *new_page_dirs, *new_page_tables; |
1463 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1465 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1464 | int ret; |
1466 | int ret; |
1465 | 1467 | ||
1466 | /* We allocate temp bitmap for page tables for no gain |
1468 | /* We allocate temp bitmap for page tables for no gain |
1467 | * but as this is for init only, lets keep the things simple |
1469 | * but as this is for init only, lets keep the things simple |
1468 | */ |
1470 | */ |
1469 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); |
1471 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); |
1470 | if (ret) |
1472 | if (ret) |
1471 | return ret; |
1473 | return ret; |
1472 | 1474 | ||
1473 | /* Allocate for all pdps regardless of how the ppgtt |
1475 | /* Allocate for all pdps regardless of how the ppgtt |
1474 | * was defined. |
1476 | * was defined. |
1475 | */ |
1477 | */ |
1476 | ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp, |
1478 | ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp, |
1477 | 0, 1ULL << 32, |
1479 | 0, 1ULL << 32, |
1478 | new_page_dirs); |
1480 | new_page_dirs); |
1479 | if (!ret) |
1481 | if (!ret) |
1480 | *ppgtt->pdp.used_pdpes = *new_page_dirs; |
1482 | *ppgtt->pdp.used_pdpes = *new_page_dirs; |
1481 | 1483 | ||
1482 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1484 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1483 | 1485 | ||
1484 | return ret; |
1486 | return ret; |
1485 | } |
1487 | } |
1486 | 1488 | ||
1487 | /* |
1489 | /* |
1488 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
1490 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
1489 | * with a net effect resembling a 2-level page table in normal x86 terms. Each |
1491 | * with a net effect resembling a 2-level page table in normal x86 terms. Each |
1490 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address |
1492 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address |
1491 | * space. |
1493 | * space. |
1492 | * |
1494 | * |
1493 | */ |
1495 | */ |
1494 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
1496 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
1495 | { |
1497 | { |
1496 | int ret; |
1498 | int ret; |
1497 | 1499 | ||
1498 | ret = gen8_init_scratch(&ppgtt->base); |
1500 | ret = gen8_init_scratch(&ppgtt->base); |
1499 | if (ret) |
1501 | if (ret) |
1500 | return ret; |
1502 | return ret; |
1501 | 1503 | ||
1502 | ppgtt->base.start = 0; |
1504 | ppgtt->base.start = 0; |
1503 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
1505 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
1504 | ppgtt->base.allocate_va_range = gen8_alloc_va_range; |
1506 | ppgtt->base.allocate_va_range = gen8_alloc_va_range; |
1505 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
1507 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
1506 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
1508 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
1507 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
1509 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
1508 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
1510 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
1509 | ppgtt->debug_dump = gen8_dump_ppgtt; |
1511 | ppgtt->debug_dump = gen8_dump_ppgtt; |
1510 | 1512 | ||
1511 | if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { |
1513 | if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { |
1512 | ret = setup_px(ppgtt->base.dev, &ppgtt->pml4); |
1514 | ret = setup_px(ppgtt->base.dev, &ppgtt->pml4); |
1513 | if (ret) |
1515 | if (ret) |
1514 | goto free_scratch; |
1516 | goto free_scratch; |
1515 | 1517 | ||
1516 | gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); |
1518 | gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); |
1517 | 1519 | ||
1518 | ppgtt->base.total = 1ULL << 48; |
1520 | ppgtt->base.total = 1ULL << 48; |
1519 | ppgtt->switch_mm = gen8_48b_mm_switch; |
1521 | ppgtt->switch_mm = gen8_48b_mm_switch; |
1520 | } else { |
1522 | } else { |
1521 | ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp); |
1523 | ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp); |
1522 | if (ret) |
1524 | if (ret) |
1523 | goto free_scratch; |
1525 | goto free_scratch; |
1524 | 1526 | ||
1525 | ppgtt->base.total = 1ULL << 32; |
1527 | ppgtt->base.total = 1ULL << 32; |
1526 | ppgtt->switch_mm = gen8_legacy_mm_switch; |
1528 | ppgtt->switch_mm = gen8_legacy_mm_switch; |
1527 | trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base, |
1529 | trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base, |
1528 | 0, 0, |
1530 | 0, 0, |
1529 | GEN8_PML4E_SHIFT); |
1531 | GEN8_PML4E_SHIFT); |
1530 | 1532 | ||
1531 | if (intel_vgpu_active(ppgtt->base.dev)) { |
1533 | if (intel_vgpu_active(ppgtt->base.dev)) { |
1532 | ret = gen8_preallocate_top_level_pdps(ppgtt); |
1534 | ret = gen8_preallocate_top_level_pdps(ppgtt); |
1533 | if (ret) |
1535 | if (ret) |
1534 | goto free_scratch; |
1536 | goto free_scratch; |
1535 | } |
1537 | } |
1536 | } |
1538 | } |
1537 | 1539 | ||
1538 | if (intel_vgpu_active(ppgtt->base.dev)) |
1540 | if (intel_vgpu_active(ppgtt->base.dev)) |
1539 | gen8_ppgtt_notify_vgt(ppgtt, true); |
1541 | gen8_ppgtt_notify_vgt(ppgtt, true); |
1540 | 1542 | ||
1541 | return 0; |
1543 | return 0; |
1542 | 1544 | ||
1543 | free_scratch: |
1545 | free_scratch: |
1544 | gen8_free_scratch(&ppgtt->base); |
1546 | gen8_free_scratch(&ppgtt->base); |
1545 | return ret; |
1547 | return ret; |
1546 | } |
1548 | } |
1547 | 1549 | ||
1548 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
1550 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
1549 | { |
1551 | { |
1550 | struct i915_address_space *vm = &ppgtt->base; |
1552 | struct i915_address_space *vm = &ppgtt->base; |
1551 | struct i915_page_table *unused; |
1553 | struct i915_page_table *unused; |
1552 | gen6_pte_t scratch_pte; |
1554 | gen6_pte_t scratch_pte; |
1553 | uint32_t pd_entry; |
1555 | uint32_t pd_entry; |
1554 | uint32_t pte, pde, temp; |
1556 | uint32_t pte, pde, temp; |
1555 | uint32_t start = ppgtt->base.start, length = ppgtt->base.total; |
1557 | uint32_t start = ppgtt->base.start, length = ppgtt->base.total; |
1556 | 1558 | ||
1557 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
1559 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
1558 | I915_CACHE_LLC, true, 0); |
1560 | I915_CACHE_LLC, true, 0); |
1559 | 1561 | ||
1560 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { |
1562 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { |
1561 | u32 expected; |
1563 | u32 expected; |
1562 | gen6_pte_t *pt_vaddr; |
1564 | gen6_pte_t *pt_vaddr; |
1563 | const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); |
1565 | const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); |
1564 | pd_entry = readl(ppgtt->pd_addr + pde); |
1566 | pd_entry = readl(ppgtt->pd_addr + pde); |
1565 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); |
1567 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); |
1566 | 1568 | ||
1567 | if (pd_entry != expected) |
1569 | if (pd_entry != expected) |
1568 | seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", |
1570 | seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", |
1569 | pde, |
1571 | pde, |
1570 | pd_entry, |
1572 | pd_entry, |
1571 | expected); |
1573 | expected); |
1572 | seq_printf(m, "\tPDE: %x\n", pd_entry); |
1574 | seq_printf(m, "\tPDE: %x\n", pd_entry); |
1573 | 1575 | ||
1574 | pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]); |
1576 | pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]); |
1575 | 1577 | ||
1576 | for (pte = 0; pte < GEN6_PTES; pte+=4) { |
1578 | for (pte = 0; pte < GEN6_PTES; pte+=4) { |
1577 | unsigned long va = |
1579 | unsigned long va = |
1578 | (pde * PAGE_SIZE * GEN6_PTES) + |
1580 | (pde * PAGE_SIZE * GEN6_PTES) + |
1579 | (pte * PAGE_SIZE); |
1581 | (pte * PAGE_SIZE); |
1580 | int i; |
1582 | int i; |
1581 | bool found = false; |
1583 | bool found = false; |
1582 | for (i = 0; i < 4; i++) |
1584 | for (i = 0; i < 4; i++) |
1583 | if (pt_vaddr[pte + i] != scratch_pte) |
1585 | if (pt_vaddr[pte + i] != scratch_pte) |
1584 | found = true; |
1586 | found = true; |
1585 | if (!found) |
1587 | if (!found) |
1586 | continue; |
1588 | continue; |
1587 | 1589 | ||
1588 | seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); |
1590 | seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); |
1589 | for (i = 0; i < 4; i++) { |
1591 | for (i = 0; i < 4; i++) { |
1590 | if (pt_vaddr[pte + i] != scratch_pte) |
1592 | if (pt_vaddr[pte + i] != scratch_pte) |
1591 | seq_printf(m, " %08x", pt_vaddr[pte + i]); |
1593 | seq_printf(m, " %08x", pt_vaddr[pte + i]); |
1592 | else |
1594 | else |
1593 | seq_puts(m, " SCRATCH "); |
1595 | seq_puts(m, " SCRATCH "); |
1594 | } |
1596 | } |
1595 | seq_puts(m, "\n"); |
1597 | seq_puts(m, "\n"); |
1596 | } |
1598 | } |
1597 | kunmap_px(ppgtt, pt_vaddr); |
1599 | kunmap_px(ppgtt, pt_vaddr); |
1598 | } |
1600 | } |
1599 | } |
1601 | } |
1600 | 1602 | ||
1601 | /* Write pde (index) from the page directory @pd to the page table @pt */ |
1603 | /* Write pde (index) from the page directory @pd to the page table @pt */ |
1602 | static void gen6_write_pde(struct i915_page_directory *pd, |
1604 | static void gen6_write_pde(struct i915_page_directory *pd, |
1603 | const int pde, struct i915_page_table *pt) |
1605 | const int pde, struct i915_page_table *pt) |
1604 | { |
1606 | { |
1605 | /* Caller needs to make sure the write completes if necessary */ |
1607 | /* Caller needs to make sure the write completes if necessary */ |
1606 | struct i915_hw_ppgtt *ppgtt = |
1608 | struct i915_hw_ppgtt *ppgtt = |
1607 | container_of(pd, struct i915_hw_ppgtt, pd); |
1609 | container_of(pd, struct i915_hw_ppgtt, pd); |
1608 | u32 pd_entry; |
1610 | u32 pd_entry; |
1609 | 1611 | ||
1610 | pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt)); |
1612 | pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt)); |
1611 | pd_entry |= GEN6_PDE_VALID; |
1613 | pd_entry |= GEN6_PDE_VALID; |
1612 | 1614 | ||
1613 | writel(pd_entry, ppgtt->pd_addr + pde); |
1615 | writel(pd_entry, ppgtt->pd_addr + pde); |
1614 | } |
1616 | } |
1615 | 1617 | ||
1616 | /* Write all the page tables found in the ppgtt structure to incrementing page |
1618 | /* Write all the page tables found in the ppgtt structure to incrementing page |
1617 | * directories. */ |
1619 | * directories. */ |
1618 | static void gen6_write_page_range(struct drm_i915_private *dev_priv, |
1620 | static void gen6_write_page_range(struct drm_i915_private *dev_priv, |
1619 | struct i915_page_directory *pd, |
1621 | struct i915_page_directory *pd, |
1620 | uint32_t start, uint32_t length) |
1622 | uint32_t start, uint32_t length) |
1621 | { |
1623 | { |
1622 | struct i915_page_table *pt; |
1624 | struct i915_page_table *pt; |
1623 | uint32_t pde, temp; |
1625 | uint32_t pde, temp; |
1624 | 1626 | ||
1625 | gen6_for_each_pde(pt, pd, start, length, temp, pde) |
1627 | gen6_for_each_pde(pt, pd, start, length, temp, pde) |
1626 | gen6_write_pde(pd, pde, pt); |
1628 | gen6_write_pde(pd, pde, pt); |
1627 | 1629 | ||
1628 | /* Make sure write is complete before other code can use this page |
1630 | /* Make sure write is complete before other code can use this page |
1629 | * table. Also require for WC mapped PTEs */ |
1631 | * table. Also require for WC mapped PTEs */ |
1630 | readl(dev_priv->gtt.gsm); |
1632 | readl(dev_priv->gtt.gsm); |
1631 | } |
1633 | } |
1632 | 1634 | ||
1633 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
1635 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
1634 | { |
1636 | { |
1635 | BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); |
1637 | BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); |
1636 | 1638 | ||
1637 | return (ppgtt->pd.base.ggtt_offset / 64) << 16; |
1639 | return (ppgtt->pd.base.ggtt_offset / 64) << 16; |
1638 | } |
1640 | } |
1639 | 1641 | ||
1640 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1642 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1641 | struct drm_i915_gem_request *req) |
1643 | struct drm_i915_gem_request *req) |
1642 | { |
1644 | { |
1643 | struct intel_engine_cs *ring = req->ring; |
1645 | struct intel_engine_cs *ring = req->ring; |
1644 | int ret; |
1646 | int ret; |
1645 | 1647 | ||
1646 | /* NB: TLBs must be flushed and invalidated before a switch */ |
1648 | /* NB: TLBs must be flushed and invalidated before a switch */ |
1647 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
1649 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
1648 | if (ret) |
1650 | if (ret) |
1649 | return ret; |
1651 | return ret; |
1650 | 1652 | ||
1651 | ret = intel_ring_begin(req, 6); |
1653 | ret = intel_ring_begin(req, 6); |
1652 | if (ret) |
1654 | if (ret) |
1653 | return ret; |
1655 | return ret; |
1654 | 1656 | ||
1655 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
1657 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
1656 | intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); |
1658 | intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); |
1657 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
1659 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
1658 | intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); |
1660 | intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); |
1659 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
1661 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
1660 | intel_ring_emit(ring, MI_NOOP); |
1662 | intel_ring_emit(ring, MI_NOOP); |
1661 | intel_ring_advance(ring); |
1663 | intel_ring_advance(ring); |
1662 | 1664 | ||
1663 | return 0; |
1665 | return 0; |
1664 | } |
1666 | } |
1665 | 1667 | ||
1666 | static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1668 | static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1667 | struct drm_i915_gem_request *req) |
1669 | struct drm_i915_gem_request *req) |
1668 | { |
1670 | { |
1669 | struct intel_engine_cs *ring = req->ring; |
1671 | struct intel_engine_cs *ring = req->ring; |
1670 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); |
1672 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); |
1671 | 1673 | ||
1672 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
1674 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
1673 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
1675 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
1674 | return 0; |
1676 | return 0; |
1675 | } |
1677 | } |
1676 | 1678 | ||
1677 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1679 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1678 | struct drm_i915_gem_request *req) |
1680 | struct drm_i915_gem_request *req) |
1679 | { |
1681 | { |
1680 | struct intel_engine_cs *ring = req->ring; |
1682 | struct intel_engine_cs *ring = req->ring; |
1681 | int ret; |
1683 | int ret; |
1682 | 1684 | ||
1683 | /* NB: TLBs must be flushed and invalidated before a switch */ |
1685 | /* NB: TLBs must be flushed and invalidated before a switch */ |
1684 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
1686 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
1685 | if (ret) |
1687 | if (ret) |
1686 | return ret; |
1688 | return ret; |
1687 | 1689 | ||
1688 | ret = intel_ring_begin(req, 6); |
1690 | ret = intel_ring_begin(req, 6); |
1689 | if (ret) |
1691 | if (ret) |
1690 | return ret; |
1692 | return ret; |
1691 | 1693 | ||
1692 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
1694 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
1693 | intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); |
1695 | intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); |
1694 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
1696 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
1695 | intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); |
1697 | intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); |
1696 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
1698 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
1697 | intel_ring_emit(ring, MI_NOOP); |
1699 | intel_ring_emit(ring, MI_NOOP); |
1698 | intel_ring_advance(ring); |
1700 | intel_ring_advance(ring); |
1699 | 1701 | ||
1700 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ |
1702 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ |
1701 | if (ring->id != RCS) { |
1703 | if (ring->id != RCS) { |
1702 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
1704 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
1703 | if (ret) |
1705 | if (ret) |
1704 | return ret; |
1706 | return ret; |
1705 | } |
1707 | } |
1706 | 1708 | ||
1707 | return 0; |
1709 | return 0; |
1708 | } |
1710 | } |
1709 | 1711 | ||
1710 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1712 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1711 | struct drm_i915_gem_request *req) |
1713 | struct drm_i915_gem_request *req) |
1712 | { |
1714 | { |
1713 | struct intel_engine_cs *ring = req->ring; |
1715 | struct intel_engine_cs *ring = req->ring; |
1714 | struct drm_device *dev = ppgtt->base.dev; |
1716 | struct drm_device *dev = ppgtt->base.dev; |
1715 | struct drm_i915_private *dev_priv = dev->dev_private; |
1717 | struct drm_i915_private *dev_priv = dev->dev_private; |
1716 | 1718 | ||
1717 | 1719 | ||
1718 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
1720 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
1719 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
1721 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
1720 | 1722 | ||
1721 | POSTING_READ(RING_PP_DIR_DCLV(ring)); |
1723 | POSTING_READ(RING_PP_DIR_DCLV(ring)); |
1722 | 1724 | ||
1723 | return 0; |
1725 | return 0; |
1724 | } |
1726 | } |
1725 | 1727 | ||
1726 | static void gen8_ppgtt_enable(struct drm_device *dev) |
1728 | static void gen8_ppgtt_enable(struct drm_device *dev) |
1727 | { |
1729 | { |
1728 | struct drm_i915_private *dev_priv = dev->dev_private; |
1730 | struct drm_i915_private *dev_priv = dev->dev_private; |
1729 | struct intel_engine_cs *ring; |
1731 | struct intel_engine_cs *ring; |
1730 | int j; |
1732 | int j; |
1731 | 1733 | ||
1732 | for_each_ring(ring, dev_priv, j) { |
1734 | for_each_ring(ring, dev_priv, j) { |
1733 | u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; |
1735 | u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; |
1734 | I915_WRITE(RING_MODE_GEN7(ring), |
1736 | I915_WRITE(RING_MODE_GEN7(ring), |
1735 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); |
1737 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); |
1736 | } |
1738 | } |
1737 | } |
1739 | } |
1738 | 1740 | ||
1739 | static void gen7_ppgtt_enable(struct drm_device *dev) |
1741 | static void gen7_ppgtt_enable(struct drm_device *dev) |
1740 | { |
1742 | { |
1741 | struct drm_i915_private *dev_priv = dev->dev_private; |
1743 | struct drm_i915_private *dev_priv = dev->dev_private; |
1742 | struct intel_engine_cs *ring; |
1744 | struct intel_engine_cs *ring; |
1743 | uint32_t ecochk, ecobits; |
1745 | uint32_t ecochk, ecobits; |
1744 | int i; |
1746 | int i; |
1745 | 1747 | ||
1746 | ecobits = I915_READ(GAC_ECO_BITS); |
1748 | ecobits = I915_READ(GAC_ECO_BITS); |
1747 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
1749 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
1748 | 1750 | ||
1749 | ecochk = I915_READ(GAM_ECOCHK); |
1751 | ecochk = I915_READ(GAM_ECOCHK); |
1750 | if (IS_HASWELL(dev)) { |
1752 | if (IS_HASWELL(dev)) { |
1751 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
1753 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
1752 | } else { |
1754 | } else { |
1753 | ecochk |= ECOCHK_PPGTT_LLC_IVB; |
1755 | ecochk |= ECOCHK_PPGTT_LLC_IVB; |
1754 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; |
1756 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; |
1755 | } |
1757 | } |
1756 | I915_WRITE(GAM_ECOCHK, ecochk); |
1758 | I915_WRITE(GAM_ECOCHK, ecochk); |
1757 | 1759 | ||
1758 | for_each_ring(ring, dev_priv, i) { |
1760 | for_each_ring(ring, dev_priv, i) { |
1759 | /* GFX_MODE is per-ring on gen7+ */ |
1761 | /* GFX_MODE is per-ring on gen7+ */ |
1760 | I915_WRITE(RING_MODE_GEN7(ring), |
1762 | I915_WRITE(RING_MODE_GEN7(ring), |
1761 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
1763 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
1762 | } |
1764 | } |
1763 | } |
1765 | } |
1764 | 1766 | ||
1765 | static void gen6_ppgtt_enable(struct drm_device *dev) |
1767 | static void gen6_ppgtt_enable(struct drm_device *dev) |
1766 | { |
1768 | { |
1767 | struct drm_i915_private *dev_priv = dev->dev_private; |
1769 | struct drm_i915_private *dev_priv = dev->dev_private; |
1768 | uint32_t ecochk, gab_ctl, ecobits; |
1770 | uint32_t ecochk, gab_ctl, ecobits; |
1769 | 1771 | ||
1770 | ecobits = I915_READ(GAC_ECO_BITS); |
1772 | ecobits = I915_READ(GAC_ECO_BITS); |
1771 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
1773 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
1772 | ECOBITS_PPGTT_CACHE64B); |
1774 | ECOBITS_PPGTT_CACHE64B); |
1773 | 1775 | ||
1774 | gab_ctl = I915_READ(GAB_CTL); |
1776 | gab_ctl = I915_READ(GAB_CTL); |
1775 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
1777 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
1776 | 1778 | ||
1777 | ecochk = I915_READ(GAM_ECOCHK); |
1779 | ecochk = I915_READ(GAM_ECOCHK); |
1778 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
1780 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
1779 | 1781 | ||
1780 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
1782 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
1781 | } |
1783 | } |
1782 | 1784 | ||
1783 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
1785 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
1784 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
1786 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
1785 | uint64_t start, |
1787 | uint64_t start, |
1786 | uint64_t length, |
1788 | uint64_t length, |
1787 | bool use_scratch) |
1789 | bool use_scratch) |
1788 | { |
1790 | { |
1789 | struct i915_hw_ppgtt *ppgtt = |
1791 | struct i915_hw_ppgtt *ppgtt = |
1790 | container_of(vm, struct i915_hw_ppgtt, base); |
1792 | container_of(vm, struct i915_hw_ppgtt, base); |
1791 | gen6_pte_t *pt_vaddr, scratch_pte; |
1793 | gen6_pte_t *pt_vaddr, scratch_pte; |
1792 | unsigned first_entry = start >> PAGE_SHIFT; |
1794 | unsigned first_entry = start >> PAGE_SHIFT; |
1793 | unsigned num_entries = length >> PAGE_SHIFT; |
1795 | unsigned num_entries = length >> PAGE_SHIFT; |
1794 | unsigned act_pt = first_entry / GEN6_PTES; |
1796 | unsigned act_pt = first_entry / GEN6_PTES; |
1795 | unsigned first_pte = first_entry % GEN6_PTES; |
1797 | unsigned first_pte = first_entry % GEN6_PTES; |
1796 | unsigned last_pte, i; |
1798 | unsigned last_pte, i; |
1797 | 1799 | ||
1798 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
1800 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
1799 | I915_CACHE_LLC, true, 0); |
1801 | I915_CACHE_LLC, true, 0); |
1800 | 1802 | ||
1801 | while (num_entries) { |
1803 | while (num_entries) { |
1802 | last_pte = first_pte + num_entries; |
1804 | last_pte = first_pte + num_entries; |
1803 | if (last_pte > GEN6_PTES) |
1805 | if (last_pte > GEN6_PTES) |
1804 | last_pte = GEN6_PTES; |
1806 | last_pte = GEN6_PTES; |
1805 | 1807 | ||
1806 | pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); |
1808 | pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); |
1807 | 1809 | ||
1808 | for (i = first_pte; i < last_pte; i++) |
1810 | for (i = first_pte; i < last_pte; i++) |
1809 | pt_vaddr[i] = scratch_pte; |
1811 | pt_vaddr[i] = scratch_pte; |
1810 | 1812 | ||
1811 | kunmap_px(ppgtt, pt_vaddr); |
1813 | kunmap_px(ppgtt, pt_vaddr); |
1812 | 1814 | ||
1813 | num_entries -= last_pte - first_pte; |
1815 | num_entries -= last_pte - first_pte; |
1814 | first_pte = 0; |
1816 | first_pte = 0; |
1815 | act_pt++; |
1817 | act_pt++; |
1816 | } |
1818 | } |
1817 | } |
1819 | } |
1818 | 1820 | ||
1819 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
1821 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
1820 | struct sg_table *pages, |
1822 | struct sg_table *pages, |
1821 | uint64_t start, |
1823 | uint64_t start, |
1822 | enum i915_cache_level cache_level, u32 flags) |
1824 | enum i915_cache_level cache_level, u32 flags) |
1823 | { |
1825 | { |
1824 | struct i915_hw_ppgtt *ppgtt = |
1826 | struct i915_hw_ppgtt *ppgtt = |
1825 | container_of(vm, struct i915_hw_ppgtt, base); |
1827 | container_of(vm, struct i915_hw_ppgtt, base); |
1826 | gen6_pte_t *pt_vaddr; |
1828 | gen6_pte_t *pt_vaddr; |
1827 | unsigned first_entry = start >> PAGE_SHIFT; |
1829 | unsigned first_entry = start >> PAGE_SHIFT; |
1828 | unsigned act_pt = first_entry / GEN6_PTES; |
1830 | unsigned act_pt = first_entry / GEN6_PTES; |
1829 | unsigned act_pte = first_entry % GEN6_PTES; |
1831 | unsigned act_pte = first_entry % GEN6_PTES; |
1830 | struct sg_page_iter sg_iter; |
1832 | struct sg_page_iter sg_iter; |
1831 | 1833 | ||
1832 | pt_vaddr = NULL; |
1834 | pt_vaddr = NULL; |
1833 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
1835 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
1834 | if (pt_vaddr == NULL) |
1836 | if (pt_vaddr == NULL) |
1835 | pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); |
1837 | pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); |
1836 | 1838 | ||
1837 | pt_vaddr[act_pte] = |
1839 | pt_vaddr[act_pte] = |
1838 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
1840 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
1839 | cache_level, true, flags); |
1841 | cache_level, true, flags); |
1840 | 1842 | ||
1841 | if (++act_pte == GEN6_PTES) { |
1843 | if (++act_pte == GEN6_PTES) { |
1842 | kunmap_px(ppgtt, pt_vaddr); |
1844 | kunmap_px(ppgtt, pt_vaddr); |
1843 | pt_vaddr = NULL; |
1845 | pt_vaddr = NULL; |
1844 | act_pt++; |
1846 | act_pt++; |
1845 | act_pte = 0; |
1847 | act_pte = 0; |
1846 | } |
1848 | } |
1847 | } |
1849 | } |
1848 | if (pt_vaddr) |
1850 | if (pt_vaddr) |
1849 | kunmap_px(ppgtt, pt_vaddr); |
1851 | kunmap_px(ppgtt, pt_vaddr); |
1850 | } |
1852 | } |
1851 | 1853 | ||
1852 | static int gen6_alloc_va_range(struct i915_address_space *vm, |
1854 | static int gen6_alloc_va_range(struct i915_address_space *vm, |
1853 | uint64_t start_in, uint64_t length_in) |
1855 | uint64_t start_in, uint64_t length_in) |
1854 | { |
1856 | { |
1855 | DECLARE_BITMAP(new_page_tables, I915_PDES); |
1857 | DECLARE_BITMAP(new_page_tables, I915_PDES); |
1856 | struct drm_device *dev = vm->dev; |
1858 | struct drm_device *dev = vm->dev; |
1857 | struct drm_i915_private *dev_priv = dev->dev_private; |
1859 | struct drm_i915_private *dev_priv = dev->dev_private; |
1858 | struct i915_hw_ppgtt *ppgtt = |
1860 | struct i915_hw_ppgtt *ppgtt = |
1859 | container_of(vm, struct i915_hw_ppgtt, base); |
1861 | container_of(vm, struct i915_hw_ppgtt, base); |
1860 | struct i915_page_table *pt; |
1862 | struct i915_page_table *pt; |
1861 | uint32_t start, length, start_save, length_save; |
1863 | uint32_t start, length, start_save, length_save; |
1862 | uint32_t pde, temp; |
1864 | uint32_t pde, temp; |
1863 | int ret; |
1865 | int ret; |
1864 | 1866 | ||
1865 | if (WARN_ON(start_in + length_in > ppgtt->base.total)) |
1867 | if (WARN_ON(start_in + length_in > ppgtt->base.total)) |
1866 | return -ENODEV; |
1868 | return -ENODEV; |
1867 | 1869 | ||
1868 | start = start_save = start_in; |
1870 | start = start_save = start_in; |
1869 | length = length_save = length_in; |
1871 | length = length_save = length_in; |
1870 | 1872 | ||
1871 | bitmap_zero(new_page_tables, I915_PDES); |
1873 | bitmap_zero(new_page_tables, I915_PDES); |
1872 | 1874 | ||
1873 | /* The allocation is done in two stages so that we can bail out with |
1875 | /* The allocation is done in two stages so that we can bail out with |
1874 | * minimal amount of pain. The first stage finds new page tables that |
1876 | * minimal amount of pain. The first stage finds new page tables that |
1875 | * need allocation. The second stage marks use ptes within the page |
1877 | * need allocation. The second stage marks use ptes within the page |
1876 | * tables. |
1878 | * tables. |
1877 | */ |
1879 | */ |
1878 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { |
1880 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { |
1879 | if (pt != vm->scratch_pt) { |
1881 | if (pt != vm->scratch_pt) { |
1880 | WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); |
1882 | WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); |
1881 | continue; |
1883 | continue; |
1882 | } |
1884 | } |
1883 | 1885 | ||
1884 | /* We've already allocated a page table */ |
1886 | /* We've already allocated a page table */ |
1885 | WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); |
1887 | WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); |
1886 | 1888 | ||
1887 | pt = alloc_pt(dev); |
1889 | pt = alloc_pt(dev); |
1888 | if (IS_ERR(pt)) { |
1890 | if (IS_ERR(pt)) { |
1889 | ret = PTR_ERR(pt); |
1891 | ret = PTR_ERR(pt); |
1890 | goto unwind_out; |
1892 | goto unwind_out; |
1891 | } |
1893 | } |
1892 | 1894 | ||
1893 | gen6_initialize_pt(vm, pt); |
1895 | gen6_initialize_pt(vm, pt); |
1894 | 1896 | ||
1895 | ppgtt->pd.page_table[pde] = pt; |
1897 | ppgtt->pd.page_table[pde] = pt; |
1896 | __set_bit(pde, new_page_tables); |
1898 | __set_bit(pde, new_page_tables); |
1897 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); |
1899 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); |
1898 | } |
1900 | } |
1899 | 1901 | ||
1900 | start = start_save; |
1902 | start = start_save; |
1901 | length = length_save; |
1903 | length = length_save; |
1902 | 1904 | ||
1903 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { |
1905 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { |
1904 | DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); |
1906 | DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); |
1905 | 1907 | ||
1906 | bitmap_zero(tmp_bitmap, GEN6_PTES); |
1908 | bitmap_zero(tmp_bitmap, GEN6_PTES); |
1907 | bitmap_set(tmp_bitmap, gen6_pte_index(start), |
1909 | bitmap_set(tmp_bitmap, gen6_pte_index(start), |
1908 | gen6_pte_count(start, length)); |
1910 | gen6_pte_count(start, length)); |
1909 | 1911 | ||
1910 | if (__test_and_clear_bit(pde, new_page_tables)) |
1912 | if (__test_and_clear_bit(pde, new_page_tables)) |
1911 | gen6_write_pde(&ppgtt->pd, pde, pt); |
1913 | gen6_write_pde(&ppgtt->pd, pde, pt); |
1912 | 1914 | ||
1913 | trace_i915_page_table_entry_map(vm, pde, pt, |
1915 | trace_i915_page_table_entry_map(vm, pde, pt, |
1914 | gen6_pte_index(start), |
1916 | gen6_pte_index(start), |
1915 | gen6_pte_count(start, length), |
1917 | gen6_pte_count(start, length), |
1916 | GEN6_PTES); |
1918 | GEN6_PTES); |
1917 | bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, |
1919 | bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, |
1918 | GEN6_PTES); |
1920 | GEN6_PTES); |
1919 | } |
1921 | } |
1920 | 1922 | ||
1921 | WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); |
1923 | WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); |
1922 | 1924 | ||
1923 | /* Make sure write is complete before other code can use this page |
1925 | /* Make sure write is complete before other code can use this page |
1924 | * table. Also require for WC mapped PTEs */ |
1926 | * table. Also require for WC mapped PTEs */ |
1925 | readl(dev_priv->gtt.gsm); |
1927 | readl(dev_priv->gtt.gsm); |
1926 | 1928 | ||
1927 | mark_tlbs_dirty(ppgtt); |
1929 | mark_tlbs_dirty(ppgtt); |
1928 | return 0; |
1930 | return 0; |
1929 | 1931 | ||
1930 | unwind_out: |
1932 | unwind_out: |
1931 | for_each_set_bit(pde, new_page_tables, I915_PDES) { |
1933 | for_each_set_bit(pde, new_page_tables, I915_PDES) { |
1932 | struct i915_page_table *pt = ppgtt->pd.page_table[pde]; |
1934 | struct i915_page_table *pt = ppgtt->pd.page_table[pde]; |
1933 | 1935 | ||
1934 | ppgtt->pd.page_table[pde] = vm->scratch_pt; |
1936 | ppgtt->pd.page_table[pde] = vm->scratch_pt; |
1935 | free_pt(vm->dev, pt); |
1937 | free_pt(vm->dev, pt); |
1936 | } |
1938 | } |
1937 | 1939 | ||
1938 | mark_tlbs_dirty(ppgtt); |
1940 | mark_tlbs_dirty(ppgtt); |
1939 | return ret; |
1941 | return ret; |
1940 | } |
1942 | } |
1941 | 1943 | ||
1942 | static int gen6_init_scratch(struct i915_address_space *vm) |
1944 | static int gen6_init_scratch(struct i915_address_space *vm) |
1943 | { |
1945 | { |
1944 | struct drm_device *dev = vm->dev; |
1946 | struct drm_device *dev = vm->dev; |
1945 | 1947 | ||
1946 | vm->scratch_page = alloc_scratch_page(dev); |
1948 | vm->scratch_page = alloc_scratch_page(dev); |
1947 | if (IS_ERR(vm->scratch_page)) |
1949 | if (IS_ERR(vm->scratch_page)) |
1948 | return PTR_ERR(vm->scratch_page); |
1950 | return PTR_ERR(vm->scratch_page); |
1949 | 1951 | ||
1950 | vm->scratch_pt = alloc_pt(dev); |
1952 | vm->scratch_pt = alloc_pt(dev); |
1951 | if (IS_ERR(vm->scratch_pt)) { |
1953 | if (IS_ERR(vm->scratch_pt)) { |
1952 | free_scratch_page(dev, vm->scratch_page); |
1954 | free_scratch_page(dev, vm->scratch_page); |
1953 | return PTR_ERR(vm->scratch_pt); |
1955 | return PTR_ERR(vm->scratch_pt); |
1954 | } |
1956 | } |
1955 | 1957 | ||
1956 | gen6_initialize_pt(vm, vm->scratch_pt); |
1958 | gen6_initialize_pt(vm, vm->scratch_pt); |
1957 | 1959 | ||
1958 | return 0; |
1960 | return 0; |
1959 | } |
1961 | } |
1960 | 1962 | ||
1961 | static void gen6_free_scratch(struct i915_address_space *vm) |
1963 | static void gen6_free_scratch(struct i915_address_space *vm) |
1962 | { |
1964 | { |
1963 | struct drm_device *dev = vm->dev; |
1965 | struct drm_device *dev = vm->dev; |
1964 | 1966 | ||
1965 | free_pt(dev, vm->scratch_pt); |
1967 | free_pt(dev, vm->scratch_pt); |
1966 | free_scratch_page(dev, vm->scratch_page); |
1968 | free_scratch_page(dev, vm->scratch_page); |
1967 | } |
1969 | } |
1968 | 1970 | ||
1969 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
1971 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
1970 | { |
1972 | { |
1971 | struct i915_hw_ppgtt *ppgtt = |
1973 | struct i915_hw_ppgtt *ppgtt = |
1972 | container_of(vm, struct i915_hw_ppgtt, base); |
1974 | container_of(vm, struct i915_hw_ppgtt, base); |
1973 | struct i915_page_table *pt; |
1975 | struct i915_page_table *pt; |
1974 | uint32_t pde; |
1976 | uint32_t pde; |
1975 | 1977 | ||
1976 | drm_mm_remove_node(&ppgtt->node); |
1978 | drm_mm_remove_node(&ppgtt->node); |
1977 | 1979 | ||
1978 | gen6_for_all_pdes(pt, ppgtt, pde) { |
1980 | gen6_for_all_pdes(pt, ppgtt, pde) { |
1979 | if (pt != vm->scratch_pt) |
1981 | if (pt != vm->scratch_pt) |
1980 | free_pt(ppgtt->base.dev, pt); |
1982 | free_pt(ppgtt->base.dev, pt); |
1981 | } |
1983 | } |
1982 | 1984 | ||
1983 | gen6_free_scratch(vm); |
1985 | gen6_free_scratch(vm); |
1984 | } |
1986 | } |
1985 | 1987 | ||
1986 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
1988 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
1987 | { |
1989 | { |
1988 | struct i915_address_space *vm = &ppgtt->base; |
1990 | struct i915_address_space *vm = &ppgtt->base; |
1989 | struct drm_device *dev = ppgtt->base.dev; |
1991 | struct drm_device *dev = ppgtt->base.dev; |
1990 | struct drm_i915_private *dev_priv = dev->dev_private; |
1992 | struct drm_i915_private *dev_priv = dev->dev_private; |
1991 | bool retried = false; |
1993 | bool retried = false; |
1992 | int ret; |
1994 | int ret; |
1993 | 1995 | ||
1994 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
1996 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
1995 | * allocator works in address space sizes, so it's multiplied by page |
1997 | * allocator works in address space sizes, so it's multiplied by page |
1996 | * size. We allocate at the top of the GTT to avoid fragmentation. |
1998 | * size. We allocate at the top of the GTT to avoid fragmentation. |
1997 | */ |
1999 | */ |
1998 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); |
2000 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); |
1999 | 2001 | ||
2000 | ret = gen6_init_scratch(vm); |
2002 | ret = gen6_init_scratch(vm); |
2001 | if (ret) |
2003 | if (ret) |
2002 | return ret; |
2004 | return ret; |
2003 | 2005 | ||
2004 | alloc: |
2006 | alloc: |
2005 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
2007 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
2006 | &ppgtt->node, GEN6_PD_SIZE, |
2008 | &ppgtt->node, GEN6_PD_SIZE, |
2007 | GEN6_PD_ALIGN, 0, |
2009 | GEN6_PD_ALIGN, 0, |
2008 | 0, dev_priv->gtt.base.total, |
2010 | 0, dev_priv->gtt.base.total, |
2009 | DRM_MM_TOPDOWN); |
2011 | DRM_MM_TOPDOWN); |
2010 | if (ret == -ENOSPC && !retried) { |
2012 | if (ret == -ENOSPC && !retried) { |
2011 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
2013 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
2012 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
2014 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
2013 | I915_CACHE_NONE, |
2015 | I915_CACHE_NONE, |
2014 | 0, dev_priv->gtt.base.total, |
2016 | 0, dev_priv->gtt.base.total, |
2015 | 0); |
2017 | 0); |
2016 | if (ret) |
2018 | if (ret) |
2017 | goto err_out; |
2019 | goto err_out; |
2018 | 2020 | ||
2019 | retried = true; |
2021 | retried = true; |
2020 | goto alloc; |
2022 | goto alloc; |
2021 | } |
2023 | } |
2022 | 2024 | ||
2023 | if (ret) |
2025 | if (ret) |
2024 | goto err_out; |
2026 | goto err_out; |
2025 | 2027 | ||
2026 | 2028 | ||
2027 | if (ppgtt->node.start < dev_priv->gtt.mappable_end) |
2029 | if (ppgtt->node.start < dev_priv->gtt.mappable_end) |
2028 | DRM_DEBUG("Forced to use aperture for PDEs\n"); |
2030 | DRM_DEBUG("Forced to use aperture for PDEs\n"); |
2029 | 2031 | ||
2030 | return 0; |
2032 | return 0; |
2031 | 2033 | ||
2032 | err_out: |
2034 | err_out: |
2033 | gen6_free_scratch(vm); |
2035 | gen6_free_scratch(vm); |
2034 | return ret; |
2036 | return ret; |
2035 | } |
2037 | } |
2036 | 2038 | ||
2037 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
2039 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
2038 | { |
2040 | { |
2039 | return gen6_ppgtt_allocate_page_directories(ppgtt); |
2041 | return gen6_ppgtt_allocate_page_directories(ppgtt); |
2040 | } |
2042 | } |
2041 | 2043 | ||
2042 | static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, |
2044 | static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, |
2043 | uint64_t start, uint64_t length) |
2045 | uint64_t start, uint64_t length) |
2044 | { |
2046 | { |
2045 | struct i915_page_table *unused; |
2047 | struct i915_page_table *unused; |
2046 | uint32_t pde, temp; |
2048 | uint32_t pde, temp; |
2047 | 2049 | ||
2048 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) |
2050 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) |
2049 | ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; |
2051 | ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; |
2050 | } |
2052 | } |
2051 | 2053 | ||
2052 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
2054 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
2053 | { |
2055 | { |
2054 | struct drm_device *dev = ppgtt->base.dev; |
2056 | struct drm_device *dev = ppgtt->base.dev; |
2055 | struct drm_i915_private *dev_priv = dev->dev_private; |
2057 | struct drm_i915_private *dev_priv = dev->dev_private; |
2056 | int ret; |
2058 | int ret; |
2057 | 2059 | ||
2058 | ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
2060 | ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
2059 | if (IS_GEN6(dev)) { |
2061 | if (IS_GEN6(dev)) { |
2060 | ppgtt->switch_mm = gen6_mm_switch; |
2062 | ppgtt->switch_mm = gen6_mm_switch; |
2061 | } else if (IS_HASWELL(dev)) { |
2063 | } else if (IS_HASWELL(dev)) { |
2062 | ppgtt->switch_mm = hsw_mm_switch; |
2064 | ppgtt->switch_mm = hsw_mm_switch; |
2063 | } else if (IS_GEN7(dev)) { |
2065 | } else if (IS_GEN7(dev)) { |
2064 | ppgtt->switch_mm = gen7_mm_switch; |
2066 | ppgtt->switch_mm = gen7_mm_switch; |
2065 | } else |
2067 | } else |
2066 | BUG(); |
2068 | BUG(); |
2067 | 2069 | ||
2068 | if (intel_vgpu_active(dev)) |
2070 | if (intel_vgpu_active(dev)) |
2069 | ppgtt->switch_mm = vgpu_mm_switch; |
2071 | ppgtt->switch_mm = vgpu_mm_switch; |
2070 | 2072 | ||
2071 | ret = gen6_ppgtt_alloc(ppgtt); |
2073 | ret = gen6_ppgtt_alloc(ppgtt); |
2072 | if (ret) |
2074 | if (ret) |
2073 | return ret; |
2075 | return ret; |
2074 | 2076 | ||
2075 | ppgtt->base.allocate_va_range = gen6_alloc_va_range; |
2077 | ppgtt->base.allocate_va_range = gen6_alloc_va_range; |
2076 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
2078 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
2077 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
2079 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
2078 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
2080 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
2079 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
2081 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
2080 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
2082 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
2081 | ppgtt->base.start = 0; |
2083 | ppgtt->base.start = 0; |
2082 | ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; |
2084 | ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; |
2083 | ppgtt->debug_dump = gen6_dump_ppgtt; |
2085 | ppgtt->debug_dump = gen6_dump_ppgtt; |
2084 | 2086 | ||
2085 | ppgtt->pd.base.ggtt_offset = |
2087 | ppgtt->pd.base.ggtt_offset = |
2086 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); |
2088 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); |
2087 | 2089 | ||
2088 | ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + |
2090 | ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + |
2089 | ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); |
2091 | ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); |
2090 | 2092 | ||
2091 | gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); |
2093 | gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); |
2092 | 2094 | ||
2093 | gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); |
2095 | gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); |
2094 | 2096 | ||
2095 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
2097 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
2096 | ppgtt->node.size >> 20, |
2098 | ppgtt->node.size >> 20, |
2097 | ppgtt->node.start / PAGE_SIZE); |
2099 | ppgtt->node.start / PAGE_SIZE); |
2098 | 2100 | ||
2099 | DRM_DEBUG("Adding PPGTT at offset %x\n", |
2101 | DRM_DEBUG("Adding PPGTT at offset %x\n", |
2100 | ppgtt->pd.base.ggtt_offset << 10); |
2102 | ppgtt->pd.base.ggtt_offset << 10); |
2101 | 2103 | ||
2102 | return 0; |
2104 | return 0; |
2103 | } |
2105 | } |
2104 | 2106 | ||
2105 | static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
2107 | static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
2106 | { |
2108 | { |
2107 | ppgtt->base.dev = dev; |
2109 | ppgtt->base.dev = dev; |
2108 | 2110 | ||
2109 | if (INTEL_INFO(dev)->gen < 8) |
2111 | if (INTEL_INFO(dev)->gen < 8) |
2110 | return gen6_ppgtt_init(ppgtt); |
2112 | return gen6_ppgtt_init(ppgtt); |
2111 | else |
2113 | else |
2112 | return gen8_ppgtt_init(ppgtt); |
2114 | return gen8_ppgtt_init(ppgtt); |
2113 | } |
2115 | } |
2114 | 2116 | ||
2115 | static void i915_address_space_init(struct i915_address_space *vm, |
2117 | static void i915_address_space_init(struct i915_address_space *vm, |
2116 | struct drm_i915_private *dev_priv) |
2118 | struct drm_i915_private *dev_priv) |
2117 | { |
2119 | { |
2118 | drm_mm_init(&vm->mm, vm->start, vm->total); |
2120 | drm_mm_init(&vm->mm, vm->start, vm->total); |
2119 | vm->dev = dev_priv->dev; |
2121 | vm->dev = dev_priv->dev; |
2120 | INIT_LIST_HEAD(&vm->active_list); |
2122 | INIT_LIST_HEAD(&vm->active_list); |
2121 | INIT_LIST_HEAD(&vm->inactive_list); |
2123 | INIT_LIST_HEAD(&vm->inactive_list); |
2122 | list_add_tail(&vm->global_link, &dev_priv->vm_list); |
2124 | list_add_tail(&vm->global_link, &dev_priv->vm_list); |
2123 | } |
2125 | } |
- | 2126 | ||
- | 2127 | static void gtt_write_workarounds(struct drm_device *dev) |
|
- | 2128 | { |
|
- | 2129 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 2130 | ||
- | 2131 | /* This function is for gtt related workarounds. This function is |
|
- | 2132 | * called on driver load and after a GPU reset, so you can place |
|
- | 2133 | * workarounds here even if they get overwritten by GPU reset. |
|
- | 2134 | */ |
|
- | 2135 | /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */ |
|
- | 2136 | if (IS_BROADWELL(dev)) |
|
- | 2137 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); |
|
- | 2138 | else if (IS_CHERRYVIEW(dev)) |
|
- | 2139 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); |
|
- | 2140 | else if (IS_SKYLAKE(dev)) |
|
- | 2141 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); |
|
- | 2142 | else if (IS_BROXTON(dev)) |
|
- | 2143 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); |
|
- | 2144 | } |
|
2124 | 2145 | ||
2125 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
2146 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
2126 | { |
2147 | { |
2127 | struct drm_i915_private *dev_priv = dev->dev_private; |
2148 | struct drm_i915_private *dev_priv = dev->dev_private; |
2128 | int ret = 0; |
2149 | int ret = 0; |
2129 | 2150 | ||
2130 | ret = __hw_ppgtt_init(dev, ppgtt); |
2151 | ret = __hw_ppgtt_init(dev, ppgtt); |
2131 | if (ret == 0) { |
2152 | if (ret == 0) { |
2132 | kref_init(&ppgtt->ref); |
2153 | kref_init(&ppgtt->ref); |
2133 | i915_address_space_init(&ppgtt->base, dev_priv); |
2154 | i915_address_space_init(&ppgtt->base, dev_priv); |
2134 | } |
2155 | } |
2135 | 2156 | ||
2136 | return ret; |
2157 | return ret; |
2137 | } |
2158 | } |
2138 | 2159 | ||
2139 | int i915_ppgtt_init_hw(struct drm_device *dev) |
2160 | int i915_ppgtt_init_hw(struct drm_device *dev) |
2140 | { |
2161 | { |
- | 2162 | gtt_write_workarounds(dev); |
|
- | 2163 | ||
2141 | /* In the case of execlists, PPGTT is enabled by the context descriptor |
2164 | /* In the case of execlists, PPGTT is enabled by the context descriptor |
2142 | * and the PDPs are contained within the context itself. We don't |
2165 | * and the PDPs are contained within the context itself. We don't |
2143 | * need to do anything here. */ |
2166 | * need to do anything here. */ |
2144 | if (i915.enable_execlists) |
2167 | if (i915.enable_execlists) |
2145 | return 0; |
2168 | return 0; |
2146 | 2169 | ||
2147 | if (!USES_PPGTT(dev)) |
2170 | if (!USES_PPGTT(dev)) |
2148 | return 0; |
2171 | return 0; |
2149 | 2172 | ||
2150 | if (IS_GEN6(dev)) |
2173 | if (IS_GEN6(dev)) |
2151 | gen6_ppgtt_enable(dev); |
2174 | gen6_ppgtt_enable(dev); |
2152 | else if (IS_GEN7(dev)) |
2175 | else if (IS_GEN7(dev)) |
2153 | gen7_ppgtt_enable(dev); |
2176 | gen7_ppgtt_enable(dev); |
2154 | else if (INTEL_INFO(dev)->gen >= 8) |
2177 | else if (INTEL_INFO(dev)->gen >= 8) |
2155 | gen8_ppgtt_enable(dev); |
2178 | gen8_ppgtt_enable(dev); |
2156 | else |
2179 | else |
2157 | MISSING_CASE(INTEL_INFO(dev)->gen); |
2180 | MISSING_CASE(INTEL_INFO(dev)->gen); |
2158 | 2181 | ||
2159 | return 0; |
2182 | return 0; |
2160 | } |
2183 | } |
2161 | 2184 | ||
2162 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) |
2185 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) |
2163 | { |
2186 | { |
2164 | struct drm_i915_private *dev_priv = req->ring->dev->dev_private; |
2187 | struct drm_i915_private *dev_priv = req->ring->dev->dev_private; |
2165 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2188 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2166 | 2189 | ||
2167 | if (i915.enable_execlists) |
2190 | if (i915.enable_execlists) |
2168 | return 0; |
2191 | return 0; |
2169 | 2192 | ||
2170 | if (!ppgtt) |
2193 | if (!ppgtt) |
2171 | return 0; |
2194 | return 0; |
2172 | 2195 | ||
2173 | return ppgtt->switch_mm(ppgtt, req); |
2196 | return ppgtt->switch_mm(ppgtt, req); |
2174 | } |
2197 | } |
2175 | 2198 | ||
2176 | struct i915_hw_ppgtt * |
2199 | struct i915_hw_ppgtt * |
2177 | i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) |
2200 | i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) |
2178 | { |
2201 | { |
2179 | struct i915_hw_ppgtt *ppgtt; |
2202 | struct i915_hw_ppgtt *ppgtt; |
2180 | int ret; |
2203 | int ret; |
2181 | 2204 | ||
2182 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
2205 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
2183 | if (!ppgtt) |
2206 | if (!ppgtt) |
2184 | return ERR_PTR(-ENOMEM); |
2207 | return ERR_PTR(-ENOMEM); |
2185 | 2208 | ||
2186 | ret = i915_ppgtt_init(dev, ppgtt); |
2209 | ret = i915_ppgtt_init(dev, ppgtt); |
2187 | if (ret) { |
2210 | if (ret) { |
2188 | kfree(ppgtt); |
2211 | kfree(ppgtt); |
2189 | return ERR_PTR(ret); |
2212 | return ERR_PTR(ret); |
2190 | } |
2213 | } |
2191 | 2214 | ||
2192 | ppgtt->file_priv = fpriv; |
2215 | ppgtt->file_priv = fpriv; |
2193 | 2216 | ||
2194 | trace_i915_ppgtt_create(&ppgtt->base); |
2217 | trace_i915_ppgtt_create(&ppgtt->base); |
2195 | 2218 | ||
2196 | return ppgtt; |
2219 | return ppgtt; |
2197 | } |
2220 | } |
2198 | 2221 | ||
2199 | void i915_ppgtt_release(struct kref *kref) |
2222 | void i915_ppgtt_release(struct kref *kref) |
2200 | { |
2223 | { |
2201 | struct i915_hw_ppgtt *ppgtt = |
2224 | struct i915_hw_ppgtt *ppgtt = |
2202 | container_of(kref, struct i915_hw_ppgtt, ref); |
2225 | container_of(kref, struct i915_hw_ppgtt, ref); |
2203 | 2226 | ||
2204 | trace_i915_ppgtt_release(&ppgtt->base); |
2227 | trace_i915_ppgtt_release(&ppgtt->base); |
2205 | 2228 | ||
2206 | /* vmas should already be unbound */ |
2229 | /* vmas should already be unbound */ |
2207 | WARN_ON(!list_empty(&ppgtt->base.active_list)); |
2230 | WARN_ON(!list_empty(&ppgtt->base.active_list)); |
2208 | WARN_ON(!list_empty(&ppgtt->base.inactive_list)); |
2231 | WARN_ON(!list_empty(&ppgtt->base.inactive_list)); |
2209 | 2232 | ||
2210 | list_del(&ppgtt->base.global_link); |
2233 | list_del(&ppgtt->base.global_link); |
2211 | drm_mm_takedown(&ppgtt->base.mm); |
2234 | drm_mm_takedown(&ppgtt->base.mm); |
2212 | 2235 | ||
2213 | ppgtt->base.cleanup(&ppgtt->base); |
2236 | ppgtt->base.cleanup(&ppgtt->base); |
2214 | kfree(ppgtt); |
2237 | kfree(ppgtt); |
2215 | } |
2238 | } |
2216 | 2239 | ||
2217 | extern int intel_iommu_gfx_mapped; |
2240 | extern int intel_iommu_gfx_mapped; |
2218 | /* Certain Gen5 chipsets require require idling the GPU before |
2241 | /* Certain Gen5 chipsets require require idling the GPU before |
2219 | * unmapping anything from the GTT when VT-d is enabled. |
2242 | * unmapping anything from the GTT when VT-d is enabled. |
2220 | */ |
2243 | */ |
2221 | static bool needs_idle_maps(struct drm_device *dev) |
2244 | static bool needs_idle_maps(struct drm_device *dev) |
2222 | { |
2245 | { |
2223 | #ifdef CONFIG_INTEL_IOMMU |
2246 | #ifdef CONFIG_INTEL_IOMMU |
2224 | /* Query intel_iommu to see if we need the workaround. Presumably that |
2247 | /* Query intel_iommu to see if we need the workaround. Presumably that |
2225 | * was loaded first. |
2248 | * was loaded first. |
2226 | */ |
2249 | */ |
2227 | if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) |
2250 | if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) |
2228 | return true; |
2251 | return true; |
2229 | #endif |
2252 | #endif |
2230 | return false; |
2253 | return false; |
2231 | } |
2254 | } |
2232 | 2255 | ||
2233 | static bool do_idling(struct drm_i915_private *dev_priv) |
2256 | static bool do_idling(struct drm_i915_private *dev_priv) |
2234 | { |
2257 | { |
2235 | bool ret = dev_priv->mm.interruptible; |
2258 | bool ret = dev_priv->mm.interruptible; |
2236 | 2259 | ||
2237 | if (unlikely(dev_priv->gtt.do_idle_maps)) { |
2260 | if (unlikely(dev_priv->gtt.do_idle_maps)) { |
2238 | dev_priv->mm.interruptible = false; |
2261 | dev_priv->mm.interruptible = false; |
2239 | if (i915_gpu_idle(dev_priv->dev)) { |
2262 | if (i915_gpu_idle(dev_priv->dev)) { |
2240 | DRM_ERROR("Couldn't idle GPU\n"); |
2263 | DRM_ERROR("Couldn't idle GPU\n"); |
2241 | /* Wait a bit, in hopes it avoids the hang */ |
2264 | /* Wait a bit, in hopes it avoids the hang */ |
2242 | udelay(10); |
2265 | udelay(10); |
2243 | } |
2266 | } |
2244 | } |
2267 | } |
2245 | 2268 | ||
2246 | return ret; |
2269 | return ret; |
2247 | } |
2270 | } |
2248 | 2271 | ||
2249 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
2272 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
2250 | { |
2273 | { |
2251 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
2274 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
2252 | dev_priv->mm.interruptible = interruptible; |
2275 | dev_priv->mm.interruptible = interruptible; |
2253 | } |
2276 | } |
2254 | 2277 | ||
2255 | void i915_check_and_clear_faults(struct drm_device *dev) |
2278 | void i915_check_and_clear_faults(struct drm_device *dev) |
2256 | { |
2279 | { |
2257 | struct drm_i915_private *dev_priv = dev->dev_private; |
2280 | struct drm_i915_private *dev_priv = dev->dev_private; |
2258 | struct intel_engine_cs *ring; |
2281 | struct intel_engine_cs *ring; |
2259 | int i; |
2282 | int i; |
2260 | 2283 | ||
2261 | if (INTEL_INFO(dev)->gen < 6) |
2284 | if (INTEL_INFO(dev)->gen < 6) |
2262 | return; |
2285 | return; |
2263 | 2286 | ||
2264 | for_each_ring(ring, dev_priv, i) { |
2287 | for_each_ring(ring, dev_priv, i) { |
2265 | u32 fault_reg; |
2288 | u32 fault_reg; |
2266 | fault_reg = I915_READ(RING_FAULT_REG(ring)); |
2289 | fault_reg = I915_READ(RING_FAULT_REG(ring)); |
2267 | if (fault_reg & RING_FAULT_VALID) { |
2290 | if (fault_reg & RING_FAULT_VALID) { |
2268 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
2291 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
2269 | "\tAddr: 0x%08lx\n" |
2292 | "\tAddr: 0x%08lx\n" |
2270 | "\tAddress space: %s\n" |
2293 | "\tAddress space: %s\n" |
2271 | "\tSource ID: %d\n" |
2294 | "\tSource ID: %d\n" |
2272 | "\tType: %d\n", |
2295 | "\tType: %d\n", |
2273 | fault_reg & PAGE_MASK, |
2296 | fault_reg & PAGE_MASK, |
2274 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", |
2297 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", |
2275 | RING_FAULT_SRCID(fault_reg), |
2298 | RING_FAULT_SRCID(fault_reg), |
2276 | RING_FAULT_FAULT_TYPE(fault_reg)); |
2299 | RING_FAULT_FAULT_TYPE(fault_reg)); |
2277 | I915_WRITE(RING_FAULT_REG(ring), |
2300 | I915_WRITE(RING_FAULT_REG(ring), |
2278 | fault_reg & ~RING_FAULT_VALID); |
2301 | fault_reg & ~RING_FAULT_VALID); |
2279 | } |
2302 | } |
2280 | } |
2303 | } |
2281 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
2304 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
2282 | } |
2305 | } |
2283 | 2306 | ||
2284 | static void i915_ggtt_flush(struct drm_i915_private *dev_priv) |
2307 | static void i915_ggtt_flush(struct drm_i915_private *dev_priv) |
2285 | { |
2308 | { |
2286 | if (INTEL_INFO(dev_priv->dev)->gen < 6) { |
2309 | if (INTEL_INFO(dev_priv->dev)->gen < 6) { |
2287 | intel_gtt_chipset_flush(); |
2310 | intel_gtt_chipset_flush(); |
2288 | } else { |
2311 | } else { |
2289 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2312 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2290 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2313 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2291 | } |
2314 | } |
2292 | } |
2315 | } |
2293 | 2316 | ||
2294 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
2317 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
2295 | { |
2318 | { |
2296 | struct drm_i915_private *dev_priv = dev->dev_private; |
2319 | struct drm_i915_private *dev_priv = dev->dev_private; |
2297 | 2320 | ||
2298 | /* Don't bother messing with faults pre GEN6 as we have little |
2321 | /* Don't bother messing with faults pre GEN6 as we have little |
2299 | * documentation supporting that it's a good idea. |
2322 | * documentation supporting that it's a good idea. |
2300 | */ |
2323 | */ |
2301 | if (INTEL_INFO(dev)->gen < 6) |
2324 | if (INTEL_INFO(dev)->gen < 6) |
2302 | return; |
2325 | return; |
2303 | 2326 | ||
2304 | i915_check_and_clear_faults(dev); |
2327 | i915_check_and_clear_faults(dev); |
2305 | 2328 | ||
2306 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
2329 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
2307 | dev_priv->gtt.base.start, |
2330 | dev_priv->gtt.base.start, |
2308 | dev_priv->gtt.base.total, |
2331 | dev_priv->gtt.base.total, |
2309 | true); |
2332 | true); |
2310 | 2333 | ||
2311 | i915_ggtt_flush(dev_priv); |
2334 | i915_ggtt_flush(dev_priv); |
2312 | } |
2335 | } |
2313 | 2336 | ||
2314 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
2337 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
2315 | { |
2338 | { |
2316 | if (!dma_map_sg(&obj->base.dev->pdev->dev, |
2339 | if (!dma_map_sg(&obj->base.dev->pdev->dev, |
2317 | obj->pages->sgl, obj->pages->nents, |
2340 | obj->pages->sgl, obj->pages->nents, |
2318 | PCI_DMA_BIDIRECTIONAL)) |
2341 | PCI_DMA_BIDIRECTIONAL)) |
2319 | return -ENOSPC; |
2342 | return -ENOSPC; |
2320 | 2343 | ||
2321 | return 0; |
2344 | return 0; |
2322 | } |
2345 | } |
2323 | 2346 | ||
2324 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) |
2347 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) |
2325 | { |
2348 | { |
2326 | #ifdef writeq |
2349 | #ifdef writeq |
2327 | writeq(pte, addr); |
2350 | writeq(pte, addr); |
2328 | #else |
2351 | #else |
2329 | iowrite32((u32)pte, addr); |
2352 | iowrite32((u32)pte, addr); |
2330 | iowrite32(pte >> 32, addr + 4); |
2353 | iowrite32(pte >> 32, addr + 4); |
2331 | #endif |
2354 | #endif |
2332 | } |
2355 | } |
2333 | 2356 | ||
2334 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
2357 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
2335 | struct sg_table *st, |
2358 | struct sg_table *st, |
2336 | uint64_t start, |
2359 | uint64_t start, |
2337 | enum i915_cache_level level, u32 unused) |
2360 | enum i915_cache_level level, u32 unused) |
2338 | { |
2361 | { |
2339 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2362 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2340 | unsigned first_entry = start >> PAGE_SHIFT; |
2363 | unsigned first_entry = start >> PAGE_SHIFT; |
2341 | gen8_pte_t __iomem *gtt_entries = |
2364 | gen8_pte_t __iomem *gtt_entries = |
2342 | (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
2365 | (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
2343 | int i = 0; |
2366 | int i = 0; |
2344 | struct sg_page_iter sg_iter; |
2367 | struct sg_page_iter sg_iter; |
2345 | dma_addr_t addr = 0; /* shut up gcc */ |
2368 | dma_addr_t addr = 0; /* shut up gcc */ |
2346 | int rpm_atomic_seq; |
2369 | int rpm_atomic_seq; |
2347 | 2370 | ||
2348 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2371 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2349 | 2372 | ||
2350 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
2373 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
2351 | addr = sg_dma_address(sg_iter.sg) + |
2374 | addr = sg_dma_address(sg_iter.sg) + |
2352 | (sg_iter.sg_pgoffset << PAGE_SHIFT); |
2375 | (sg_iter.sg_pgoffset << PAGE_SHIFT); |
2353 | gen8_set_pte(>t_entries[i], |
2376 | gen8_set_pte(>t_entries[i], |
2354 | gen8_pte_encode(addr, level, true)); |
2377 | gen8_pte_encode(addr, level, true)); |
2355 | i++; |
2378 | i++; |
2356 | } |
2379 | } |
2357 | 2380 | ||
2358 | /* |
2381 | /* |
2359 | * XXX: This serves as a posting read to make sure that the PTE has |
2382 | * XXX: This serves as a posting read to make sure that the PTE has |
2360 | * actually been updated. There is some concern that even though |
2383 | * actually been updated. There is some concern that even though |
2361 | * registers and PTEs are within the same BAR that they are potentially |
2384 | * registers and PTEs are within the same BAR that they are potentially |
2362 | * of NUMA access patterns. Therefore, even with the way we assume |
2385 | * of NUMA access patterns. Therefore, even with the way we assume |
2363 | * hardware should work, we must keep this posting read for paranoia. |
2386 | * hardware should work, we must keep this posting read for paranoia. |
2364 | */ |
2387 | */ |
2365 | if (i != 0) |
2388 | if (i != 0) |
2366 | WARN_ON(readq(>t_entries[i-1]) |
2389 | WARN_ON(readq(>t_entries[i-1]) |
2367 | != gen8_pte_encode(addr, level, true)); |
2390 | != gen8_pte_encode(addr, level, true)); |
2368 | 2391 | ||
2369 | /* This next bit makes the above posting read even more important. We |
2392 | /* This next bit makes the above posting read even more important. We |
2370 | * want to flush the TLBs only after we're certain all the PTE updates |
2393 | * want to flush the TLBs only after we're certain all the PTE updates |
2371 | * have finished. |
2394 | * have finished. |
2372 | */ |
2395 | */ |
2373 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2396 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2374 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2397 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2375 | 2398 | ||
2376 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2399 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2377 | } |
2400 | } |
2378 | 2401 | ||
2379 | struct insert_entries { |
2402 | struct insert_entries { |
2380 | struct i915_address_space *vm; |
2403 | struct i915_address_space *vm; |
2381 | struct sg_table *st; |
2404 | struct sg_table *st; |
2382 | uint64_t start; |
2405 | uint64_t start; |
2383 | enum i915_cache_level level; |
2406 | enum i915_cache_level level; |
2384 | u32 flags; |
2407 | u32 flags; |
2385 | }; |
2408 | }; |
2386 | 2409 | ||
2387 | static int gen8_ggtt_insert_entries__cb(void *_arg) |
2410 | static int gen8_ggtt_insert_entries__cb(void *_arg) |
2388 | { |
2411 | { |
2389 | struct insert_entries *arg = _arg; |
2412 | struct insert_entries *arg = _arg; |
2390 | gen8_ggtt_insert_entries(arg->vm, arg->st, |
2413 | gen8_ggtt_insert_entries(arg->vm, arg->st, |
2391 | arg->start, arg->level, arg->flags); |
2414 | arg->start, arg->level, arg->flags); |
2392 | return 0; |
2415 | return 0; |
2393 | } |
2416 | } |
2394 | 2417 | ||
2395 | static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, |
2418 | static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, |
2396 | struct sg_table *st, |
2419 | struct sg_table *st, |
2397 | uint64_t start, |
2420 | uint64_t start, |
2398 | enum i915_cache_level level, |
2421 | enum i915_cache_level level, |
2399 | u32 flags) |
2422 | u32 flags) |
2400 | { |
2423 | { |
2401 | struct insert_entries arg = { vm, st, start, level, flags }; |
2424 | struct insert_entries arg = { vm, st, start, level, flags }; |
2402 | gen8_ggtt_insert_entries__cb, &arg; |
2425 | gen8_ggtt_insert_entries__cb, &arg; |
2403 | } |
2426 | } |
2404 | 2427 | ||
2405 | /* |
2428 | /* |
2406 | * Binds an object into the global gtt with the specified cache level. The object |
2429 | * Binds an object into the global gtt with the specified cache level. The object |
2407 | * will be accessible to the GPU via commands whose operands reference offsets |
2430 | * will be accessible to the GPU via commands whose operands reference offsets |
2408 | * within the global GTT as well as accessible by the GPU through the GMADR |
2431 | * within the global GTT as well as accessible by the GPU through the GMADR |
2409 | * mapped BAR (dev_priv->mm.gtt->gtt). |
2432 | * mapped BAR (dev_priv->mm.gtt->gtt). |
2410 | */ |
2433 | */ |
2411 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
2434 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
2412 | struct sg_table *st, |
2435 | struct sg_table *st, |
2413 | uint64_t start, |
2436 | uint64_t start, |
2414 | enum i915_cache_level level, u32 flags) |
2437 | enum i915_cache_level level, u32 flags) |
2415 | { |
2438 | { |
2416 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2439 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2417 | unsigned first_entry = start >> PAGE_SHIFT; |
2440 | unsigned first_entry = start >> PAGE_SHIFT; |
2418 | gen6_pte_t __iomem *gtt_entries = |
2441 | gen6_pte_t __iomem *gtt_entries = |
2419 | (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
2442 | (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
2420 | int i = 0; |
2443 | int i = 0; |
2421 | struct sg_page_iter sg_iter; |
2444 | struct sg_page_iter sg_iter; |
2422 | dma_addr_t addr = 0; |
2445 | dma_addr_t addr = 0; |
2423 | int rpm_atomic_seq; |
2446 | int rpm_atomic_seq; |
2424 | 2447 | ||
2425 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2448 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2426 | 2449 | ||
2427 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
2450 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
2428 | addr = sg_page_iter_dma_address(&sg_iter); |
2451 | addr = sg_page_iter_dma_address(&sg_iter); |
2429 | iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); |
2452 | iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); |
2430 | i++; |
2453 | i++; |
2431 | } |
2454 | } |
2432 | 2455 | ||
2433 | /* XXX: This serves as a posting read to make sure that the PTE has |
2456 | /* XXX: This serves as a posting read to make sure that the PTE has |
2434 | * actually been updated. There is some concern that even though |
2457 | * actually been updated. There is some concern that even though |
2435 | * registers and PTEs are within the same BAR that they are potentially |
2458 | * registers and PTEs are within the same BAR that they are potentially |
2436 | * of NUMA access patterns. Therefore, even with the way we assume |
2459 | * of NUMA access patterns. Therefore, even with the way we assume |
2437 | * hardware should work, we must keep this posting read for paranoia. |
2460 | * hardware should work, we must keep this posting read for paranoia. |
2438 | */ |
2461 | */ |
2439 | if (i != 0) { |
2462 | if (i != 0) { |
2440 | unsigned long gtt = readl(>t_entries[i-1]); |
2463 | unsigned long gtt = readl(>t_entries[i-1]); |
2441 | WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); |
2464 | WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); |
2442 | } |
2465 | } |
2443 | 2466 | ||
2444 | /* This next bit makes the above posting read even more important. We |
2467 | /* This next bit makes the above posting read even more important. We |
2445 | * want to flush the TLBs only after we're certain all the PTE updates |
2468 | * want to flush the TLBs only after we're certain all the PTE updates |
2446 | * have finished. |
2469 | * have finished. |
2447 | */ |
2470 | */ |
2448 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2471 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2449 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2472 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2450 | 2473 | ||
2451 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2474 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2452 | } |
2475 | } |
2453 | 2476 | ||
2454 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
2477 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
2455 | uint64_t start, |
2478 | uint64_t start, |
2456 | uint64_t length, |
2479 | uint64_t length, |
2457 | bool use_scratch) |
2480 | bool use_scratch) |
2458 | { |
2481 | { |
2459 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2482 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2460 | unsigned first_entry = start >> PAGE_SHIFT; |
2483 | unsigned first_entry = start >> PAGE_SHIFT; |
2461 | unsigned num_entries = length >> PAGE_SHIFT; |
2484 | unsigned num_entries = length >> PAGE_SHIFT; |
2462 | gen8_pte_t scratch_pte, __iomem *gtt_base = |
2485 | gen8_pte_t scratch_pte, __iomem *gtt_base = |
2463 | (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
2486 | (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
2464 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
2487 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
2465 | int i; |
2488 | int i; |
2466 | int rpm_atomic_seq; |
2489 | int rpm_atomic_seq; |
2467 | 2490 | ||
2468 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2491 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2469 | 2492 | ||
2470 | if (WARN(num_entries > max_entries, |
2493 | if (WARN(num_entries > max_entries, |
2471 | "First entry = %d; Num entries = %d (max=%d)\n", |
2494 | "First entry = %d; Num entries = %d (max=%d)\n", |
2472 | first_entry, num_entries, max_entries)) |
2495 | first_entry, num_entries, max_entries)) |
2473 | num_entries = max_entries; |
2496 | num_entries = max_entries; |
2474 | 2497 | ||
2475 | scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
2498 | scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
2476 | I915_CACHE_LLC, |
2499 | I915_CACHE_LLC, |
2477 | use_scratch); |
2500 | use_scratch); |
2478 | for (i = 0; i < num_entries; i++) |
2501 | for (i = 0; i < num_entries; i++) |
2479 | gen8_set_pte(>t_base[i], scratch_pte); |
2502 | gen8_set_pte(>t_base[i], scratch_pte); |
2480 | readl(gtt_base); |
2503 | readl(gtt_base); |
2481 | 2504 | ||
2482 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2505 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2483 | } |
2506 | } |
2484 | 2507 | ||
2485 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
2508 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
2486 | uint64_t start, |
2509 | uint64_t start, |
2487 | uint64_t length, |
2510 | uint64_t length, |
2488 | bool use_scratch) |
2511 | bool use_scratch) |
2489 | { |
2512 | { |
2490 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2513 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2491 | unsigned first_entry = start >> PAGE_SHIFT; |
2514 | unsigned first_entry = start >> PAGE_SHIFT; |
2492 | unsigned num_entries = length >> PAGE_SHIFT; |
2515 | unsigned num_entries = length >> PAGE_SHIFT; |
2493 | gen6_pte_t scratch_pte, __iomem *gtt_base = |
2516 | gen6_pte_t scratch_pte, __iomem *gtt_base = |
2494 | (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
2517 | (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
2495 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
2518 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
2496 | int i; |
2519 | int i; |
2497 | int rpm_atomic_seq; |
2520 | int rpm_atomic_seq; |
2498 | 2521 | ||
2499 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2522 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2500 | 2523 | ||
2501 | if (WARN(num_entries > max_entries, |
2524 | if (WARN(num_entries > max_entries, |
2502 | "First entry = %d; Num entries = %d (max=%d)\n", |
2525 | "First entry = %d; Num entries = %d (max=%d)\n", |
2503 | first_entry, num_entries, max_entries)) |
2526 | first_entry, num_entries, max_entries)) |
2504 | num_entries = max_entries; |
2527 | num_entries = max_entries; |
2505 | 2528 | ||
2506 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
2529 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
2507 | I915_CACHE_LLC, use_scratch, 0); |
2530 | I915_CACHE_LLC, use_scratch, 0); |
2508 | 2531 | ||
2509 | for (i = 0; i < num_entries; i++) |
2532 | for (i = 0; i < num_entries; i++) |
2510 | iowrite32(scratch_pte, >t_base[i]); |
2533 | iowrite32(scratch_pte, >t_base[i]); |
2511 | readl(gtt_base); |
2534 | readl(gtt_base); |
2512 | 2535 | ||
2513 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2536 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2514 | } |
2537 | } |
2515 | 2538 | ||
2516 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
2539 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
2517 | struct sg_table *pages, |
2540 | struct sg_table *pages, |
2518 | uint64_t start, |
2541 | uint64_t start, |
2519 | enum i915_cache_level cache_level, u32 unused) |
2542 | enum i915_cache_level cache_level, u32 unused) |
2520 | { |
2543 | { |
2521 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2544 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2522 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
2545 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
2523 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
2546 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
2524 | int rpm_atomic_seq; |
2547 | int rpm_atomic_seq; |
2525 | 2548 | ||
2526 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2549 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2527 | 2550 | ||
2528 | intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); |
2551 | intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); |
2529 | 2552 | ||
2530 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2553 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2531 | 2554 | ||
2532 | } |
2555 | } |
2533 | 2556 | ||
2534 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
2557 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
2535 | uint64_t start, |
2558 | uint64_t start, |
2536 | uint64_t length, |
2559 | uint64_t length, |
2537 | bool unused) |
2560 | bool unused) |
2538 | { |
2561 | { |
2539 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2562 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
2540 | unsigned first_entry = start >> PAGE_SHIFT; |
2563 | unsigned first_entry = start >> PAGE_SHIFT; |
2541 | unsigned num_entries = length >> PAGE_SHIFT; |
2564 | unsigned num_entries = length >> PAGE_SHIFT; |
2542 | int rpm_atomic_seq; |
2565 | int rpm_atomic_seq; |
2543 | 2566 | ||
2544 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2567 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); |
2545 | 2568 | ||
2546 | intel_gtt_clear_range(first_entry, num_entries); |
2569 | intel_gtt_clear_range(first_entry, num_entries); |
2547 | 2570 | ||
2548 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2571 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
2549 | } |
2572 | } |
2550 | 2573 | ||
2551 | static int ggtt_bind_vma(struct i915_vma *vma, |
2574 | static int ggtt_bind_vma(struct i915_vma *vma, |
2552 | enum i915_cache_level cache_level, |
2575 | enum i915_cache_level cache_level, |
2553 | u32 flags) |
2576 | u32 flags) |
2554 | { |
2577 | { |
2555 | struct drm_i915_gem_object *obj = vma->obj; |
2578 | struct drm_i915_gem_object *obj = vma->obj; |
2556 | u32 pte_flags = 0; |
2579 | u32 pte_flags = 0; |
2557 | int ret; |
2580 | int ret; |
2558 | 2581 | ||
2559 | ret = i915_get_ggtt_vma_pages(vma); |
2582 | ret = i915_get_ggtt_vma_pages(vma); |
2560 | if (ret) |
2583 | if (ret) |
2561 | return ret; |
2584 | return ret; |
2562 | 2585 | ||
2563 | /* Currently applicable only to VLV */ |
2586 | /* Currently applicable only to VLV */ |
2564 | if (obj->gt_ro) |
2587 | if (obj->gt_ro) |
2565 | pte_flags |= PTE_READ_ONLY; |
2588 | pte_flags |= PTE_READ_ONLY; |
2566 | 2589 | ||
2567 | vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, |
2590 | vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, |
2568 | vma->node.start, |
2591 | vma->node.start, |
2569 | cache_level, pte_flags); |
2592 | cache_level, pte_flags); |
2570 | 2593 | ||
2571 | /* |
2594 | /* |
2572 | * Without aliasing PPGTT there's no difference between |
2595 | * Without aliasing PPGTT there's no difference between |
2573 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally |
2596 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally |
2574 | * upgrade to both bound if we bind either to avoid double-binding. |
2597 | * upgrade to both bound if we bind either to avoid double-binding. |
2575 | */ |
2598 | */ |
2576 | vma->bound |= GLOBAL_BIND | LOCAL_BIND; |
2599 | vma->bound |= GLOBAL_BIND | LOCAL_BIND; |
2577 | 2600 | ||
2578 | return 0; |
2601 | return 0; |
2579 | } |
2602 | } |
2580 | 2603 | ||
2581 | static int aliasing_gtt_bind_vma(struct i915_vma *vma, |
2604 | static int aliasing_gtt_bind_vma(struct i915_vma *vma, |
2582 | enum i915_cache_level cache_level, |
2605 | enum i915_cache_level cache_level, |
2583 | u32 flags) |
2606 | u32 flags) |
2584 | { |
2607 | { |
2585 | struct drm_device *dev = vma->vm->dev; |
2608 | struct drm_device *dev = vma->vm->dev; |
2586 | struct drm_i915_private *dev_priv = dev->dev_private; |
2609 | struct drm_i915_private *dev_priv = dev->dev_private; |
2587 | struct drm_i915_gem_object *obj = vma->obj; |
2610 | struct drm_i915_gem_object *obj = vma->obj; |
2588 | struct sg_table *pages = obj->pages; |
2611 | struct sg_table *pages = obj->pages; |
2589 | u32 pte_flags = 0; |
2612 | u32 pte_flags = 0; |
2590 | int ret; |
2613 | int ret; |
2591 | 2614 | ||
2592 | ret = i915_get_ggtt_vma_pages(vma); |
2615 | ret = i915_get_ggtt_vma_pages(vma); |
2593 | if (ret) |
2616 | if (ret) |
2594 | return ret; |
2617 | return ret; |
2595 | pages = vma->ggtt_view.pages; |
2618 | pages = vma->ggtt_view.pages; |
2596 | 2619 | ||
2597 | /* Currently applicable only to VLV */ |
2620 | /* Currently applicable only to VLV */ |
2598 | if (obj->gt_ro) |
2621 | if (obj->gt_ro) |
2599 | pte_flags |= PTE_READ_ONLY; |
2622 | pte_flags |= PTE_READ_ONLY; |
2600 | 2623 | ||
2601 | 2624 | ||
2602 | if (flags & GLOBAL_BIND) { |
2625 | if (flags & GLOBAL_BIND) { |
2603 | vma->vm->insert_entries(vma->vm, pages, |
2626 | vma->vm->insert_entries(vma->vm, pages, |
2604 | vma->node.start, |
2627 | vma->node.start, |
2605 | cache_level, pte_flags); |
2628 | cache_level, pte_flags); |
2606 | } |
2629 | } |
2607 | 2630 | ||
2608 | if (flags & LOCAL_BIND) { |
2631 | if (flags & LOCAL_BIND) { |
2609 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
2632 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
2610 | appgtt->base.insert_entries(&appgtt->base, pages, |
2633 | appgtt->base.insert_entries(&appgtt->base, pages, |
2611 | vma->node.start, |
2634 | vma->node.start, |
2612 | cache_level, pte_flags); |
2635 | cache_level, pte_flags); |
2613 | } |
2636 | } |
2614 | 2637 | ||
2615 | return 0; |
2638 | return 0; |
2616 | } |
2639 | } |
2617 | 2640 | ||
2618 | static void ggtt_unbind_vma(struct i915_vma *vma) |
2641 | static void ggtt_unbind_vma(struct i915_vma *vma) |
2619 | { |
2642 | { |
2620 | struct drm_device *dev = vma->vm->dev; |
2643 | struct drm_device *dev = vma->vm->dev; |
2621 | struct drm_i915_private *dev_priv = dev->dev_private; |
2644 | struct drm_i915_private *dev_priv = dev->dev_private; |
2622 | struct drm_i915_gem_object *obj = vma->obj; |
2645 | struct drm_i915_gem_object *obj = vma->obj; |
2623 | const uint64_t size = min_t(uint64_t, |
2646 | const uint64_t size = min_t(uint64_t, |
2624 | obj->base.size, |
2647 | obj->base.size, |
2625 | vma->node.size); |
2648 | vma->node.size); |
2626 | 2649 | ||
2627 | if (vma->bound & GLOBAL_BIND) { |
2650 | if (vma->bound & GLOBAL_BIND) { |
2628 | vma->vm->clear_range(vma->vm, |
2651 | vma->vm->clear_range(vma->vm, |
2629 | vma->node.start, |
2652 | vma->node.start, |
2630 | size, |
2653 | size, |
2631 | true); |
2654 | true); |
2632 | } |
2655 | } |
2633 | 2656 | ||
2634 | if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { |
2657 | if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { |
2635 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
2658 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
2636 | 2659 | ||
2637 | appgtt->base.clear_range(&appgtt->base, |
2660 | appgtt->base.clear_range(&appgtt->base, |
2638 | vma->node.start, |
2661 | vma->node.start, |
2639 | size, |
2662 | size, |
2640 | true); |
2663 | true); |
2641 | } |
2664 | } |
2642 | } |
2665 | } |
2643 | 2666 | ||
2644 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) |
2667 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) |
2645 | { |
2668 | { |
2646 | struct drm_device *dev = obj->base.dev; |
2669 | struct drm_device *dev = obj->base.dev; |
2647 | struct drm_i915_private *dev_priv = dev->dev_private; |
2670 | struct drm_i915_private *dev_priv = dev->dev_private; |
2648 | bool interruptible; |
2671 | bool interruptible; |
2649 | 2672 | ||
2650 | interruptible = do_idling(dev_priv); |
2673 | interruptible = do_idling(dev_priv); |
2651 | 2674 | ||
2652 | dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents, |
2675 | dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents, |
2653 | PCI_DMA_BIDIRECTIONAL); |
2676 | PCI_DMA_BIDIRECTIONAL); |
2654 | 2677 | ||
2655 | undo_idling(dev_priv, interruptible); |
2678 | undo_idling(dev_priv, interruptible); |
2656 | } |
2679 | } |
2657 | 2680 | ||
2658 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
2681 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
2659 | unsigned long color, |
2682 | unsigned long color, |
2660 | u64 *start, |
2683 | u64 *start, |
2661 | u64 *end) |
2684 | u64 *end) |
2662 | { |
2685 | { |
2663 | if (node->color != color) |
2686 | if (node->color != color) |
2664 | *start += 4096; |
2687 | *start += 4096; |
2665 | 2688 | ||
2666 | if (!list_empty(&node->node_list)) { |
2689 | if (!list_empty(&node->node_list)) { |
2667 | node = list_entry(node->node_list.next, |
2690 | node = list_entry(node->node_list.next, |
2668 | struct drm_mm_node, |
2691 | struct drm_mm_node, |
2669 | node_list); |
2692 | node_list); |
2670 | if (node->allocated && node->color != color) |
2693 | if (node->allocated && node->color != color) |
2671 | *end -= 4096; |
2694 | *end -= 4096; |
2672 | } |
2695 | } |
2673 | } |
2696 | } |
2674 | 2697 | ||
2675 | static int i915_gem_setup_global_gtt(struct drm_device *dev, |
2698 | static int i915_gem_setup_global_gtt(struct drm_device *dev, |
2676 | u64 start, |
2699 | u64 start, |
2677 | u64 mappable_end, |
2700 | u64 mappable_end, |
2678 | u64 end) |
2701 | u64 end) |
2679 | { |
2702 | { |
2680 | /* Let GEM Manage all of the aperture. |
2703 | /* Let GEM Manage all of the aperture. |
2681 | * |
2704 | * |
2682 | * However, leave one page at the end still bound to the scratch page. |
2705 | * However, leave one page at the end still bound to the scratch page. |
2683 | * There are a number of places where the hardware apparently prefetches |
2706 | * There are a number of places where the hardware apparently prefetches |
2684 | * past the end of the object, and we've seen multiple hangs with the |
2707 | * past the end of the object, and we've seen multiple hangs with the |
2685 | * GPU head pointer stuck in a batchbuffer bound at the last page of the |
2708 | * GPU head pointer stuck in a batchbuffer bound at the last page of the |
2686 | * aperture. One page should be enough to keep any prefetching inside |
2709 | * aperture. One page should be enough to keep any prefetching inside |
2687 | * of the aperture. |
2710 | * of the aperture. |
2688 | */ |
2711 | */ |
2689 | struct drm_i915_private *dev_priv = dev->dev_private; |
2712 | struct drm_i915_private *dev_priv = dev->dev_private; |
2690 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
2713 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
2691 | struct drm_mm_node *entry; |
2714 | struct drm_mm_node *entry; |
2692 | struct drm_i915_gem_object *obj; |
2715 | struct drm_i915_gem_object *obj; |
2693 | unsigned long hole_start, hole_end; |
2716 | unsigned long hole_start, hole_end; |
2694 | int ret; |
2717 | int ret; |
2695 | 2718 | ||
2696 | BUG_ON(mappable_end > end); |
2719 | BUG_ON(mappable_end > end); |
2697 | 2720 | ||
2698 | ggtt_vm->start = start; |
2721 | ggtt_vm->start = start; |
2699 | 2722 | ||
2700 | /* Subtract the guard page before address space initialization to |
2723 | /* Subtract the guard page before address space initialization to |
2701 | * shrink the range used by drm_mm */ |
2724 | * shrink the range used by drm_mm */ |
2702 | ggtt_vm->total = end - start - PAGE_SIZE; |
2725 | ggtt_vm->total = end - start - PAGE_SIZE; |
2703 | i915_address_space_init(ggtt_vm, dev_priv); |
2726 | i915_address_space_init(ggtt_vm, dev_priv); |
2704 | ggtt_vm->total += PAGE_SIZE; |
2727 | ggtt_vm->total += PAGE_SIZE; |
2705 | 2728 | ||
2706 | if (intel_vgpu_active(dev)) { |
2729 | if (intel_vgpu_active(dev)) { |
2707 | ret = intel_vgt_balloon(dev); |
2730 | ret = intel_vgt_balloon(dev); |
2708 | if (ret) |
2731 | if (ret) |
2709 | return ret; |
2732 | return ret; |
2710 | } |
2733 | } |
2711 | 2734 | ||
2712 | if (!HAS_LLC(dev)) |
2735 | if (!HAS_LLC(dev)) |
2713 | ggtt_vm->mm.color_adjust = i915_gtt_color_adjust; |
2736 | ggtt_vm->mm.color_adjust = i915_gtt_color_adjust; |
2714 | 2737 | ||
2715 | /* Mark any preallocated objects as occupied */ |
2738 | /* Mark any preallocated objects as occupied */ |
2716 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
2739 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
2717 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
2740 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
2718 | 2741 | ||
2719 | DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", |
2742 | DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", |
2720 | i915_gem_obj_ggtt_offset(obj), obj->base.size); |
2743 | i915_gem_obj_ggtt_offset(obj), obj->base.size); |
2721 | 2744 | ||
2722 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
2745 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
2723 | ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); |
2746 | ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); |
2724 | if (ret) { |
2747 | if (ret) { |
2725 | DRM_DEBUG_KMS("Reservation failed: %i\n", ret); |
2748 | DRM_DEBUG_KMS("Reservation failed: %i\n", ret); |
2726 | return ret; |
2749 | return ret; |
2727 | } |
2750 | } |
2728 | vma->bound |= GLOBAL_BIND; |
2751 | vma->bound |= GLOBAL_BIND; |
2729 | __i915_vma_set_map_and_fenceable(vma); |
2752 | __i915_vma_set_map_and_fenceable(vma); |
2730 | list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); |
2753 | list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list); |
2731 | } |
2754 | } |
2732 | 2755 | ||
2733 | /* Clear any non-preallocated blocks */ |
2756 | /* Clear any non-preallocated blocks */ |
2734 | drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { |
2757 | drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { |
2735 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
2758 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
2736 | hole_start, hole_end); |
2759 | hole_start, hole_end); |
2737 | ggtt_vm->clear_range(ggtt_vm, hole_start, |
2760 | ggtt_vm->clear_range(ggtt_vm, hole_start, |
2738 | hole_end - hole_start, true); |
2761 | hole_end - hole_start, true); |
2739 | } |
2762 | } |
2740 | 2763 | ||
2741 | /* And finally clear the reserved guard page */ |
2764 | /* And finally clear the reserved guard page */ |
2742 | ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); |
2765 | ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); |
2743 | 2766 | ||
2744 | if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { |
2767 | if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { |
2745 | struct i915_hw_ppgtt *ppgtt; |
2768 | struct i915_hw_ppgtt *ppgtt; |
2746 | 2769 | ||
2747 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
2770 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
2748 | if (!ppgtt) |
2771 | if (!ppgtt) |
2749 | return -ENOMEM; |
2772 | return -ENOMEM; |
2750 | 2773 | ||
2751 | ret = __hw_ppgtt_init(dev, ppgtt); |
2774 | ret = __hw_ppgtt_init(dev, ppgtt); |
2752 | if (ret) { |
2775 | if (ret) { |
2753 | ppgtt->base.cleanup(&ppgtt->base); |
2776 | ppgtt->base.cleanup(&ppgtt->base); |
2754 | kfree(ppgtt); |
2777 | kfree(ppgtt); |
2755 | return ret; |
2778 | return ret; |
2756 | } |
2779 | } |
2757 | 2780 | ||
2758 | if (ppgtt->base.allocate_va_range) |
2781 | if (ppgtt->base.allocate_va_range) |
2759 | ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0, |
2782 | ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0, |
2760 | ppgtt->base.total); |
2783 | ppgtt->base.total); |
2761 | if (ret) { |
2784 | if (ret) { |
2762 | ppgtt->base.cleanup(&ppgtt->base); |
2785 | ppgtt->base.cleanup(&ppgtt->base); |
2763 | kfree(ppgtt); |
2786 | kfree(ppgtt); |
2764 | return ret; |
2787 | return ret; |
2765 | } |
2788 | } |
2766 | 2789 | ||
2767 | ppgtt->base.clear_range(&ppgtt->base, |
2790 | ppgtt->base.clear_range(&ppgtt->base, |
2768 | ppgtt->base.start, |
2791 | ppgtt->base.start, |
2769 | ppgtt->base.total, |
2792 | ppgtt->base.total, |
2770 | true); |
2793 | true); |
2771 | 2794 | ||
2772 | dev_priv->mm.aliasing_ppgtt = ppgtt; |
2795 | dev_priv->mm.aliasing_ppgtt = ppgtt; |
2773 | WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma); |
2796 | WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma); |
2774 | dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma; |
2797 | dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma; |
2775 | } |
2798 | } |
2776 | 2799 | ||
2777 | return 0; |
2800 | return 0; |
2778 | } |
2801 | } |
2779 | 2802 | ||
2780 | void i915_gem_init_global_gtt(struct drm_device *dev) |
2803 | void i915_gem_init_global_gtt(struct drm_device *dev) |
2781 | { |
2804 | { |
2782 | struct drm_i915_private *dev_priv = dev->dev_private; |
2805 | struct drm_i915_private *dev_priv = dev->dev_private; |
2783 | u64 gtt_size, mappable_size; |
2806 | u64 gtt_size, mappable_size; |
2784 | 2807 | ||
2785 | gtt_size = dev_priv->gtt.base.total; |
2808 | gtt_size = dev_priv->gtt.base.total; |
2786 | mappable_size = dev_priv->gtt.mappable_end; |
2809 | mappable_size = dev_priv->gtt.mappable_end; |
2787 | 2810 | ||
2788 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
2811 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
2789 | } |
2812 | } |
2790 | 2813 | ||
2791 | void i915_global_gtt_cleanup(struct drm_device *dev) |
2814 | void i915_global_gtt_cleanup(struct drm_device *dev) |
2792 | { |
2815 | { |
2793 | struct drm_i915_private *dev_priv = dev->dev_private; |
2816 | struct drm_i915_private *dev_priv = dev->dev_private; |
2794 | struct i915_address_space *vm = &dev_priv->gtt.base; |
2817 | struct i915_address_space *vm = &dev_priv->gtt.base; |
2795 | 2818 | ||
2796 | if (dev_priv->mm.aliasing_ppgtt) { |
2819 | if (dev_priv->mm.aliasing_ppgtt) { |
2797 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2820 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2798 | 2821 | ||
2799 | ppgtt->base.cleanup(&ppgtt->base); |
2822 | ppgtt->base.cleanup(&ppgtt->base); |
2800 | } |
2823 | } |
- | 2824 | ||
- | 2825 | i915_gem_cleanup_stolen(dev); |
|
2801 | 2826 | ||
2802 | if (drm_mm_initialized(&vm->mm)) { |
2827 | if (drm_mm_initialized(&vm->mm)) { |
2803 | if (intel_vgpu_active(dev)) |
2828 | if (intel_vgpu_active(dev)) |
2804 | intel_vgt_deballoon(); |
2829 | intel_vgt_deballoon(); |
2805 | 2830 | ||
2806 | drm_mm_takedown(&vm->mm); |
2831 | drm_mm_takedown(&vm->mm); |
2807 | list_del(&vm->global_link); |
2832 | list_del(&vm->global_link); |
2808 | } |
2833 | } |
2809 | 2834 | ||
2810 | vm->cleanup(vm); |
2835 | vm->cleanup(vm); |
2811 | } |
2836 | } |
2812 | 2837 | ||
2813 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
2838 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
2814 | { |
2839 | { |
2815 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
2840 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
2816 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; |
2841 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; |
2817 | return snb_gmch_ctl << 20; |
2842 | return snb_gmch_ctl << 20; |
2818 | } |
2843 | } |
2819 | 2844 | ||
2820 | static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
2845 | static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
2821 | { |
2846 | { |
2822 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; |
2847 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; |
2823 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
2848 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
2824 | if (bdw_gmch_ctl) |
2849 | if (bdw_gmch_ctl) |
2825 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
2850 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
2826 | 2851 | ||
2827 | #ifdef CONFIG_X86_32 |
2852 | #ifdef CONFIG_X86_32 |
2828 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ |
2853 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ |
2829 | if (bdw_gmch_ctl > 4) |
2854 | if (bdw_gmch_ctl > 4) |
2830 | bdw_gmch_ctl = 4; |
2855 | bdw_gmch_ctl = 4; |
2831 | #endif |
2856 | #endif |
2832 | 2857 | ||
2833 | return bdw_gmch_ctl << 20; |
2858 | return bdw_gmch_ctl << 20; |
2834 | } |
2859 | } |
2835 | 2860 | ||
2836 | static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
2861 | static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
2837 | { |
2862 | { |
2838 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; |
2863 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; |
2839 | gmch_ctrl &= SNB_GMCH_GGMS_MASK; |
2864 | gmch_ctrl &= SNB_GMCH_GGMS_MASK; |
2840 | 2865 | ||
2841 | if (gmch_ctrl) |
2866 | if (gmch_ctrl) |
2842 | return 1 << (20 + gmch_ctrl); |
2867 | return 1 << (20 + gmch_ctrl); |
2843 | 2868 | ||
2844 | return 0; |
2869 | return 0; |
2845 | } |
2870 | } |
2846 | 2871 | ||
2847 | static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
2872 | static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
2848 | { |
2873 | { |
2849 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
2874 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
2850 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
2875 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
2851 | return snb_gmch_ctl << 25; /* 32 MB units */ |
2876 | return snb_gmch_ctl << 25; /* 32 MB units */ |
2852 | } |
2877 | } |
2853 | 2878 | ||
2854 | static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) |
2879 | static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) |
2855 | { |
2880 | { |
2856 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
2881 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
2857 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; |
2882 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; |
2858 | return bdw_gmch_ctl << 25; /* 32 MB units */ |
2883 | return bdw_gmch_ctl << 25; /* 32 MB units */ |
2859 | } |
2884 | } |
2860 | 2885 | ||
2861 | static size_t chv_get_stolen_size(u16 gmch_ctrl) |
2886 | static size_t chv_get_stolen_size(u16 gmch_ctrl) |
2862 | { |
2887 | { |
2863 | gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; |
2888 | gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; |
2864 | gmch_ctrl &= SNB_GMCH_GMS_MASK; |
2889 | gmch_ctrl &= SNB_GMCH_GMS_MASK; |
2865 | 2890 | ||
2866 | /* |
2891 | /* |
2867 | * 0x0 to 0x10: 32MB increments starting at 0MB |
2892 | * 0x0 to 0x10: 32MB increments starting at 0MB |
2868 | * 0x11 to 0x16: 4MB increments starting at 8MB |
2893 | * 0x11 to 0x16: 4MB increments starting at 8MB |
2869 | * 0x17 to 0x1d: 4MB increments start at 36MB |
2894 | * 0x17 to 0x1d: 4MB increments start at 36MB |
2870 | */ |
2895 | */ |
2871 | if (gmch_ctrl < 0x11) |
2896 | if (gmch_ctrl < 0x11) |
2872 | return gmch_ctrl << 25; |
2897 | return gmch_ctrl << 25; |
2873 | else if (gmch_ctrl < 0x17) |
2898 | else if (gmch_ctrl < 0x17) |
2874 | return (gmch_ctrl - 0x11 + 2) << 22; |
2899 | return (gmch_ctrl - 0x11 + 2) << 22; |
2875 | else |
2900 | else |
2876 | return (gmch_ctrl - 0x17 + 9) << 22; |
2901 | return (gmch_ctrl - 0x17 + 9) << 22; |
2877 | } |
2902 | } |
2878 | 2903 | ||
2879 | static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) |
2904 | static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) |
2880 | { |
2905 | { |
2881 | gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
2906 | gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
2882 | gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; |
2907 | gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; |
2883 | 2908 | ||
2884 | if (gen9_gmch_ctl < 0xf0) |
2909 | if (gen9_gmch_ctl < 0xf0) |
2885 | return gen9_gmch_ctl << 25; /* 32 MB units */ |
2910 | return gen9_gmch_ctl << 25; /* 32 MB units */ |
2886 | else |
2911 | else |
2887 | /* 4MB increments starting at 0xf0 for 4MB */ |
2912 | /* 4MB increments starting at 0xf0 for 4MB */ |
2888 | return (gen9_gmch_ctl - 0xf0 + 1) << 22; |
2913 | return (gen9_gmch_ctl - 0xf0 + 1) << 22; |
2889 | } |
2914 | } |
2890 | 2915 | ||
2891 | static int ggtt_probe_common(struct drm_device *dev, |
2916 | static int ggtt_probe_common(struct drm_device *dev, |
2892 | size_t gtt_size) |
2917 | size_t gtt_size) |
2893 | { |
2918 | { |
2894 | struct drm_i915_private *dev_priv = dev->dev_private; |
2919 | struct drm_i915_private *dev_priv = dev->dev_private; |
2895 | struct i915_page_scratch *scratch_page; |
2920 | struct i915_page_scratch *scratch_page; |
2896 | phys_addr_t gtt_phys_addr; |
2921 | phys_addr_t gtt_phys_addr; |
2897 | 2922 | ||
2898 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
2923 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
2899 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + |
2924 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + |
2900 | (pci_resource_len(dev->pdev, 0) / 2); |
2925 | (pci_resource_len(dev->pdev, 0) / 2); |
2901 | 2926 | ||
2902 | /* |
2927 | /* |
2903 | * On BXT writes larger than 64 bit to the GTT pagetable range will be |
2928 | * On BXT writes larger than 64 bit to the GTT pagetable range will be |
2904 | * dropped. For WC mappings in general we have 64 byte burst writes |
2929 | * dropped. For WC mappings in general we have 64 byte burst writes |
2905 | * when the WC buffer is flushed, so we can't use it, but have to |
2930 | * when the WC buffer is flushed, so we can't use it, but have to |
2906 | * resort to an uncached mapping. The WC issue is easily caught by the |
2931 | * resort to an uncached mapping. The WC issue is easily caught by the |
2907 | * readback check when writing GTT PTE entries. |
2932 | * readback check when writing GTT PTE entries. |
2908 | */ |
2933 | */ |
2909 | if (IS_BROXTON(dev)) |
2934 | if (IS_BROXTON(dev)) |
2910 | dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); |
2935 | dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); |
2911 | else |
2936 | else |
2912 | dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); |
2937 | dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); |
2913 | if (!dev_priv->gtt.gsm) { |
2938 | if (!dev_priv->gtt.gsm) { |
2914 | DRM_ERROR("Failed to map the gtt page table\n"); |
2939 | DRM_ERROR("Failed to map the gtt page table\n"); |
2915 | return -ENOMEM; |
2940 | return -ENOMEM; |
2916 | } |
2941 | } |
2917 | 2942 | ||
2918 | scratch_page = alloc_scratch_page(dev); |
2943 | scratch_page = alloc_scratch_page(dev); |
2919 | if (IS_ERR(scratch_page)) { |
2944 | if (IS_ERR(scratch_page)) { |
2920 | DRM_ERROR("Scratch setup failed\n"); |
2945 | DRM_ERROR("Scratch setup failed\n"); |
2921 | /* iounmap will also get called at remove, but meh */ |
2946 | /* iounmap will also get called at remove, but meh */ |
2922 | iounmap(dev_priv->gtt.gsm); |
2947 | iounmap(dev_priv->gtt.gsm); |
2923 | return PTR_ERR(scratch_page); |
2948 | return PTR_ERR(scratch_page); |
2924 | } |
2949 | } |
2925 | 2950 | ||
2926 | dev_priv->gtt.base.scratch_page = scratch_page; |
2951 | dev_priv->gtt.base.scratch_page = scratch_page; |
2927 | 2952 | ||
2928 | return 0; |
2953 | return 0; |
2929 | } |
2954 | } |
2930 | 2955 | ||
2931 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
2956 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
2932 | * bits. When using advanced contexts each context stores its own PAT, but |
2957 | * bits. When using advanced contexts each context stores its own PAT, but |
2933 | * writing this data shouldn't be harmful even in those cases. */ |
2958 | * writing this data shouldn't be harmful even in those cases. */ |
2934 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) |
2959 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) |
2935 | { |
2960 | { |
2936 | uint64_t pat; |
2961 | uint64_t pat; |
2937 | 2962 | ||
2938 | pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ |
2963 | pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ |
2939 | GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ |
2964 | GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ |
2940 | GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ |
2965 | GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ |
2941 | GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ |
2966 | GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ |
2942 | GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | |
2967 | GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | |
2943 | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | |
2968 | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | |
2944 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | |
2969 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | |
2945 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
2970 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
2946 | 2971 | ||
2947 | if (!USES_PPGTT(dev_priv->dev)) |
2972 | if (!USES_PPGTT(dev_priv->dev)) |
2948 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
2973 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
2949 | * so RTL will always use the value corresponding to |
2974 | * so RTL will always use the value corresponding to |
2950 | * pat_sel = 000". |
2975 | * pat_sel = 000". |
2951 | * So let's disable cache for GGTT to avoid screen corruptions. |
2976 | * So let's disable cache for GGTT to avoid screen corruptions. |
2952 | * MOCS still can be used though. |
2977 | * MOCS still can be used though. |
2953 | * - System agent ggtt writes (i.e. cpu gtt mmaps) already work |
2978 | * - System agent ggtt writes (i.e. cpu gtt mmaps) already work |
2954 | * before this patch, i.e. the same uncached + snooping access |
2979 | * before this patch, i.e. the same uncached + snooping access |
2955 | * like on gen6/7 seems to be in effect. |
2980 | * like on gen6/7 seems to be in effect. |
2956 | * - So this just fixes blitter/render access. Again it looks |
2981 | * - So this just fixes blitter/render access. Again it looks |
2957 | * like it's not just uncached access, but uncached + snooping. |
2982 | * like it's not just uncached access, but uncached + snooping. |
2958 | * So we can still hold onto all our assumptions wrt cpu |
2983 | * So we can still hold onto all our assumptions wrt cpu |
2959 | * clflushing on LLC machines. |
2984 | * clflushing on LLC machines. |
2960 | */ |
2985 | */ |
2961 | pat = GEN8_PPAT(0, GEN8_PPAT_UC); |
2986 | pat = GEN8_PPAT(0, GEN8_PPAT_UC); |
2962 | 2987 | ||
2963 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b |
2988 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b |
2964 | * write would work. */ |
2989 | * write would work. */ |
2965 | I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); |
2990 | I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); |
2966 | I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); |
2991 | I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); |
2967 | } |
2992 | } |
2968 | 2993 | ||
2969 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) |
2994 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) |
2970 | { |
2995 | { |
2971 | uint64_t pat; |
2996 | uint64_t pat; |
2972 | 2997 | ||
2973 | /* |
2998 | /* |
2974 | * Map WB on BDW to snooped on CHV. |
2999 | * Map WB on BDW to snooped on CHV. |
2975 | * |
3000 | * |
2976 | * Only the snoop bit has meaning for CHV, the rest is |
3001 | * Only the snoop bit has meaning for CHV, the rest is |
2977 | * ignored. |
3002 | * ignored. |
2978 | * |
3003 | * |
2979 | * The hardware will never snoop for certain types of accesses: |
3004 | * The hardware will never snoop for certain types of accesses: |
2980 | * - CPU GTT (GMADR->GGTT->no snoop->memory) |
3005 | * - CPU GTT (GMADR->GGTT->no snoop->memory) |
2981 | * - PPGTT page tables |
3006 | * - PPGTT page tables |
2982 | * - some other special cycles |
3007 | * - some other special cycles |
2983 | * |
3008 | * |
2984 | * As with BDW, we also need to consider the following for GT accesses: |
3009 | * As with BDW, we also need to consider the following for GT accesses: |
2985 | * "For GGTT, there is NO pat_sel[2:0] from the entry, |
3010 | * "For GGTT, there is NO pat_sel[2:0] from the entry, |
2986 | * so RTL will always use the value corresponding to |
3011 | * so RTL will always use the value corresponding to |
2987 | * pat_sel = 000". |
3012 | * pat_sel = 000". |
2988 | * Which means we must set the snoop bit in PAT entry 0 |
3013 | * Which means we must set the snoop bit in PAT entry 0 |
2989 | * in order to keep the global status page working. |
3014 | * in order to keep the global status page working. |
2990 | */ |
3015 | */ |
2991 | pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | |
3016 | pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | |
2992 | GEN8_PPAT(1, 0) | |
3017 | GEN8_PPAT(1, 0) | |
2993 | GEN8_PPAT(2, 0) | |
3018 | GEN8_PPAT(2, 0) | |
2994 | GEN8_PPAT(3, 0) | |
3019 | GEN8_PPAT(3, 0) | |
2995 | GEN8_PPAT(4, CHV_PPAT_SNOOP) | |
3020 | GEN8_PPAT(4, CHV_PPAT_SNOOP) | |
2996 | GEN8_PPAT(5, CHV_PPAT_SNOOP) | |
3021 | GEN8_PPAT(5, CHV_PPAT_SNOOP) | |
2997 | GEN8_PPAT(6, CHV_PPAT_SNOOP) | |
3022 | GEN8_PPAT(6, CHV_PPAT_SNOOP) | |
2998 | GEN8_PPAT(7, CHV_PPAT_SNOOP); |
3023 | GEN8_PPAT(7, CHV_PPAT_SNOOP); |
2999 | 3024 | ||
3000 | I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); |
3025 | I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); |
3001 | I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); |
3026 | I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); |
3002 | } |
3027 | } |
3003 | 3028 | ||
3004 | static int gen8_gmch_probe(struct drm_device *dev, |
3029 | static int gen8_gmch_probe(struct drm_device *dev, |
3005 | u64 *gtt_total, |
3030 | u64 *gtt_total, |
3006 | size_t *stolen, |
3031 | size_t *stolen, |
3007 | phys_addr_t *mappable_base, |
3032 | phys_addr_t *mappable_base, |
3008 | u64 *mappable_end) |
3033 | u64 *mappable_end) |
3009 | { |
3034 | { |
3010 | struct drm_i915_private *dev_priv = dev->dev_private; |
3035 | struct drm_i915_private *dev_priv = dev->dev_private; |
3011 | u64 gtt_size; |
3036 | u64 gtt_size; |
3012 | u16 snb_gmch_ctl; |
3037 | u16 snb_gmch_ctl; |
3013 | int ret; |
3038 | int ret; |
3014 | 3039 | ||
3015 | /* TODO: We're not aware of mappable constraints on gen8 yet */ |
3040 | /* TODO: We're not aware of mappable constraints on gen8 yet */ |
3016 | *mappable_base = pci_resource_start(dev->pdev, 2); |
3041 | *mappable_base = pci_resource_start(dev->pdev, 2); |
3017 | *mappable_end = pci_resource_len(dev->pdev, 2); |
3042 | *mappable_end = pci_resource_len(dev->pdev, 2); |
3018 | - | ||
3019 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) |
- | |
3020 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); |
- | |
3021 | 3043 | ||
3022 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
3044 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
3023 | 3045 | ||
3024 | if (INTEL_INFO(dev)->gen >= 9) { |
3046 | if (INTEL_INFO(dev)->gen >= 9) { |
3025 | *stolen = gen9_get_stolen_size(snb_gmch_ctl); |
3047 | *stolen = gen9_get_stolen_size(snb_gmch_ctl); |
3026 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
3048 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
3027 | } else if (IS_CHERRYVIEW(dev)) { |
3049 | } else if (IS_CHERRYVIEW(dev)) { |
3028 | *stolen = chv_get_stolen_size(snb_gmch_ctl); |
3050 | *stolen = chv_get_stolen_size(snb_gmch_ctl); |
3029 | gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); |
3051 | gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); |
3030 | } else { |
3052 | } else { |
3031 | *stolen = gen8_get_stolen_size(snb_gmch_ctl); |
3053 | *stolen = gen8_get_stolen_size(snb_gmch_ctl); |
3032 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
3054 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
3033 | } |
3055 | } |
3034 | 3056 | ||
3035 | *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; |
3057 | *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; |
3036 | 3058 | ||
3037 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) |
3059 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) |
3038 | chv_setup_private_ppat(dev_priv); |
3060 | chv_setup_private_ppat(dev_priv); |
3039 | else |
3061 | else |
3040 | bdw_setup_private_ppat(dev_priv); |
3062 | bdw_setup_private_ppat(dev_priv); |
3041 | 3063 | ||
3042 | ret = ggtt_probe_common(dev, gtt_size); |
3064 | ret = ggtt_probe_common(dev, gtt_size); |
3043 | 3065 | ||
3044 | dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; |
3066 | dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; |
3045 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; |
3067 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; |
3046 | dev_priv->gtt.base.bind_vma = ggtt_bind_vma; |
3068 | dev_priv->gtt.base.bind_vma = ggtt_bind_vma; |
3047 | dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; |
3069 | dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; |
3048 | 3070 | ||
3049 | if (IS_CHERRYVIEW(dev_priv)) |
3071 | if (IS_CHERRYVIEW(dev_priv)) |
3050 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL; |
3072 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL; |
3051 | 3073 | ||
3052 | return ret; |
3074 | return ret; |
3053 | } |
3075 | } |
3054 | 3076 | ||
3055 | static int gen6_gmch_probe(struct drm_device *dev, |
3077 | static int gen6_gmch_probe(struct drm_device *dev, |
3056 | u64 *gtt_total, |
3078 | u64 *gtt_total, |
3057 | size_t *stolen, |
3079 | size_t *stolen, |
3058 | phys_addr_t *mappable_base, |
3080 | phys_addr_t *mappable_base, |
3059 | u64 *mappable_end) |
3081 | u64 *mappable_end) |
3060 | { |
3082 | { |
3061 | struct drm_i915_private *dev_priv = dev->dev_private; |
3083 | struct drm_i915_private *dev_priv = dev->dev_private; |
3062 | unsigned int gtt_size; |
3084 | unsigned int gtt_size; |
3063 | u16 snb_gmch_ctl; |
3085 | u16 snb_gmch_ctl; |
3064 | int ret; |
3086 | int ret; |
3065 | 3087 | ||
3066 | *mappable_base = pci_resource_start(dev->pdev, 2); |
3088 | *mappable_base = pci_resource_start(dev->pdev, 2); |
3067 | *mappable_end = pci_resource_len(dev->pdev, 2); |
3089 | *mappable_end = pci_resource_len(dev->pdev, 2); |
3068 | 3090 | ||
3069 | /* 64/512MB is the current min/max we actually know of, but this is just |
3091 | /* 64/512MB is the current min/max we actually know of, but this is just |
3070 | * a coarse sanity check. |
3092 | * a coarse sanity check. |
3071 | */ |
3093 | */ |
3072 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { |
3094 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { |
3073 | DRM_ERROR("Unknown GMADR size (%llx)\n", |
3095 | DRM_ERROR("Unknown GMADR size (%llx)\n", |
3074 | dev_priv->gtt.mappable_end); |
3096 | dev_priv->gtt.mappable_end); |
3075 | return -ENXIO; |
3097 | return -ENXIO; |
3076 | } |
3098 | } |
3077 | - | ||
3078 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) |
- | |
3079 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); |
3099 | |
3080 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
3100 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
3081 | 3101 | ||
3082 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); |
3102 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); |
3083 | 3103 | ||
3084 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); |
3104 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); |
3085 | *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; |
3105 | *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; |
3086 | 3106 | ||
3087 | ret = ggtt_probe_common(dev, gtt_size); |
3107 | ret = ggtt_probe_common(dev, gtt_size); |
3088 | 3108 | ||
3089 | dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; |
3109 | dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; |
3090 | dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; |
3110 | dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; |
3091 | dev_priv->gtt.base.bind_vma = ggtt_bind_vma; |
3111 | dev_priv->gtt.base.bind_vma = ggtt_bind_vma; |
3092 | dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; |
3112 | dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; |
3093 | 3113 | ||
3094 | return ret; |
3114 | return ret; |
3095 | } |
3115 | } |
3096 | 3116 | ||
3097 | static void gen6_gmch_remove(struct i915_address_space *vm) |
3117 | static void gen6_gmch_remove(struct i915_address_space *vm) |
3098 | { |
3118 | { |
3099 | 3119 | ||
3100 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
3120 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
3101 | 3121 | ||
3102 | iounmap(gtt->gsm); |
3122 | iounmap(gtt->gsm); |
3103 | free_scratch_page(vm->dev, vm->scratch_page); |
3123 | free_scratch_page(vm->dev, vm->scratch_page); |
3104 | } |
3124 | } |
3105 | 3125 | ||
3106 | static int i915_gmch_probe(struct drm_device *dev, |
3126 | static int i915_gmch_probe(struct drm_device *dev, |
3107 | u64 *gtt_total, |
3127 | u64 *gtt_total, |
3108 | size_t *stolen, |
3128 | size_t *stolen, |
3109 | phys_addr_t *mappable_base, |
3129 | phys_addr_t *mappable_base, |
3110 | u64 *mappable_end) |
3130 | u64 *mappable_end) |
3111 | { |
3131 | { |
3112 | struct drm_i915_private *dev_priv = dev->dev_private; |
3132 | struct drm_i915_private *dev_priv = dev->dev_private; |
3113 | int ret; |
3133 | int ret; |
3114 | 3134 | ||
3115 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); |
3135 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); |
3116 | if (!ret) { |
3136 | if (!ret) { |
3117 | DRM_ERROR("failed to set up gmch\n"); |
3137 | DRM_ERROR("failed to set up gmch\n"); |
3118 | return -EIO; |
3138 | return -EIO; |
3119 | } |
3139 | } |
3120 | 3140 | ||
3121 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
3141 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
3122 | 3142 | ||
3123 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
3143 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
3124 | dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; |
3144 | dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; |
3125 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
3145 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
3126 | dev_priv->gtt.base.bind_vma = ggtt_bind_vma; |
3146 | dev_priv->gtt.base.bind_vma = ggtt_bind_vma; |
3127 | dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; |
3147 | dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; |
3128 | 3148 | ||
3129 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
3149 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
3130 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
3150 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
3131 | 3151 | ||
3132 | return 0; |
3152 | return 0; |
3133 | } |
3153 | } |
3134 | 3154 | ||
3135 | static void i915_gmch_remove(struct i915_address_space *vm) |
3155 | static void i915_gmch_remove(struct i915_address_space *vm) |
3136 | { |
3156 | { |
3137 | // intel_gmch_remove(); |
3157 | // intel_gmch_remove(); |
3138 | } |
3158 | } |
3139 | 3159 | ||
3140 | int i915_gem_gtt_init(struct drm_device *dev) |
3160 | int i915_gem_gtt_init(struct drm_device *dev) |
3141 | { |
3161 | { |
3142 | struct drm_i915_private *dev_priv = dev->dev_private; |
3162 | struct drm_i915_private *dev_priv = dev->dev_private; |
3143 | struct i915_gtt *gtt = &dev_priv->gtt; |
3163 | struct i915_gtt *gtt = &dev_priv->gtt; |
3144 | int ret; |
3164 | int ret; |
3145 | 3165 | ||
3146 | if (INTEL_INFO(dev)->gen <= 5) { |
3166 | if (INTEL_INFO(dev)->gen <= 5) { |
3147 | gtt->gtt_probe = i915_gmch_probe; |
3167 | gtt->gtt_probe = i915_gmch_probe; |
3148 | gtt->base.cleanup = i915_gmch_remove; |
3168 | gtt->base.cleanup = i915_gmch_remove; |
3149 | } else if (INTEL_INFO(dev)->gen < 8) { |
3169 | } else if (INTEL_INFO(dev)->gen < 8) { |
3150 | gtt->gtt_probe = gen6_gmch_probe; |
3170 | gtt->gtt_probe = gen6_gmch_probe; |
3151 | gtt->base.cleanup = gen6_gmch_remove; |
3171 | gtt->base.cleanup = gen6_gmch_remove; |
3152 | if (IS_HASWELL(dev) && dev_priv->ellc_size) |
3172 | if (IS_HASWELL(dev) && dev_priv->ellc_size) |
3153 | gtt->base.pte_encode = iris_pte_encode; |
3173 | gtt->base.pte_encode = iris_pte_encode; |
3154 | else if (IS_HASWELL(dev)) |
3174 | else if (IS_HASWELL(dev)) |
3155 | gtt->base.pte_encode = hsw_pte_encode; |
3175 | gtt->base.pte_encode = hsw_pte_encode; |
3156 | else if (IS_VALLEYVIEW(dev)) |
3176 | else if (IS_VALLEYVIEW(dev)) |
3157 | gtt->base.pte_encode = byt_pte_encode; |
3177 | gtt->base.pte_encode = byt_pte_encode; |
3158 | else if (INTEL_INFO(dev)->gen >= 7) |
3178 | else if (INTEL_INFO(dev)->gen >= 7) |
3159 | gtt->base.pte_encode = ivb_pte_encode; |
3179 | gtt->base.pte_encode = ivb_pte_encode; |
3160 | else |
3180 | else |
3161 | gtt->base.pte_encode = snb_pte_encode; |
3181 | gtt->base.pte_encode = snb_pte_encode; |
3162 | } else { |
3182 | } else { |
3163 | dev_priv->gtt.gtt_probe = gen8_gmch_probe; |
3183 | dev_priv->gtt.gtt_probe = gen8_gmch_probe; |
3164 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; |
3184 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; |
3165 | } |
3185 | } |
3166 | 3186 | ||
3167 | gtt->base.dev = dev; |
3187 | gtt->base.dev = dev; |
- | 3188 | gtt->base.is_ggtt = true; |
|
3168 | 3189 | ||
3169 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
3190 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
3170 | >t->mappable_base, >t->mappable_end); |
3191 | >t->mappable_base, >t->mappable_end); |
3171 | if (ret) |
3192 | if (ret) |
3172 | return ret; |
3193 | return ret; |
- | 3194 | ||
- | 3195 | /* |
|
- | 3196 | * Initialise stolen early so that we may reserve preallocated |
|
- | 3197 | * objects for the BIOS to KMS transition. |
|
- | 3198 | */ |
|
- | 3199 | ret = i915_gem_init_stolen(dev); |
|
- | 3200 | if (ret) |
|
- | 3201 | goto out_gtt_cleanup; |
|
3173 | 3202 | ||
3174 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
3203 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
3175 | DRM_INFO("Memory usable by graphics device = %lluM\n", |
3204 | DRM_INFO("Memory usable by graphics device = %lluM\n", |
3176 | gtt->base.total >> 20); |
3205 | gtt->base.total >> 20); |
3177 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20); |
3206 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20); |
3178 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
3207 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
3179 | #ifdef CONFIG_INTEL_IOMMU |
3208 | #ifdef CONFIG_INTEL_IOMMU |
3180 | if (intel_iommu_gfx_mapped) |
3209 | if (intel_iommu_gfx_mapped) |
3181 | DRM_INFO("VT-d active for gfx access\n"); |
3210 | DRM_INFO("VT-d active for gfx access\n"); |
3182 | #endif |
3211 | #endif |
3183 | /* |
3212 | /* |
3184 | * i915.enable_ppgtt is read-only, so do an early pass to validate the |
3213 | * i915.enable_ppgtt is read-only, so do an early pass to validate the |
3185 | * user's requested state against the hardware/driver capabilities. We |
3214 | * user's requested state against the hardware/driver capabilities. We |
3186 | * do this now so that we can print out any log messages once rather |
3215 | * do this now so that we can print out any log messages once rather |
3187 | * than every time we check intel_enable_ppgtt(). |
3216 | * than every time we check intel_enable_ppgtt(). |
3188 | */ |
3217 | */ |
3189 | i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); |
3218 | i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); |
3190 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); |
3219 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); |
3191 | 3220 | ||
3192 | return 0; |
3221 | return 0; |
- | 3222 | ||
- | 3223 | out_gtt_cleanup: |
|
- | 3224 | gtt->base.cleanup(&dev_priv->gtt.base); |
|
- | 3225 | ||
- | 3226 | return ret; |
|
3193 | } |
3227 | } |
3194 | 3228 | ||
3195 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
3229 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
3196 | { |
3230 | { |
3197 | struct drm_i915_private *dev_priv = dev->dev_private; |
3231 | struct drm_i915_private *dev_priv = dev->dev_private; |
3198 | struct drm_i915_gem_object *obj; |
3232 | struct drm_i915_gem_object *obj; |
3199 | struct i915_address_space *vm; |
3233 | struct i915_address_space *vm; |
3200 | struct i915_vma *vma; |
3234 | struct i915_vma *vma; |
3201 | bool flush; |
3235 | bool flush; |
3202 | 3236 | ||
3203 | i915_check_and_clear_faults(dev); |
3237 | i915_check_and_clear_faults(dev); |
3204 | 3238 | ||
3205 | /* First fill our portion of the GTT with scratch pages */ |
3239 | /* First fill our portion of the GTT with scratch pages */ |
3206 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
3240 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
3207 | dev_priv->gtt.base.start, |
3241 | dev_priv->gtt.base.start, |
3208 | dev_priv->gtt.base.total, |
3242 | dev_priv->gtt.base.total, |
3209 | true); |
3243 | true); |
3210 | 3244 | ||
3211 | /* Cache flush objects bound into GGTT and rebind them. */ |
3245 | /* Cache flush objects bound into GGTT and rebind them. */ |
3212 | vm = &dev_priv->gtt.base; |
3246 | vm = &dev_priv->gtt.base; |
3213 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
3247 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
3214 | flush = false; |
3248 | flush = false; |
3215 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
3249 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
3216 | if (vma->vm != vm) |
3250 | if (vma->vm != vm) |
3217 | continue; |
3251 | continue; |
3218 | 3252 | ||
3219 | WARN_ON(i915_vma_bind(vma, obj->cache_level, |
3253 | WARN_ON(i915_vma_bind(vma, obj->cache_level, |
3220 | PIN_UPDATE)); |
3254 | PIN_UPDATE)); |
3221 | 3255 | ||
3222 | flush = true; |
3256 | flush = true; |
3223 | } |
3257 | } |
3224 | 3258 | ||
3225 | if (flush) |
3259 | if (flush) |
3226 | i915_gem_clflush_object(obj, obj->pin_display); |
3260 | i915_gem_clflush_object(obj, obj->pin_display); |
3227 | } |
3261 | } |
3228 | 3262 | ||
3229 | if (INTEL_INFO(dev)->gen >= 8) { |
3263 | if (INTEL_INFO(dev)->gen >= 8) { |
3230 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) |
3264 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) |
3231 | chv_setup_private_ppat(dev_priv); |
3265 | chv_setup_private_ppat(dev_priv); |
3232 | else |
3266 | else |
3233 | bdw_setup_private_ppat(dev_priv); |
3267 | bdw_setup_private_ppat(dev_priv); |
3234 | 3268 | ||
3235 | return; |
3269 | return; |
3236 | } |
3270 | } |
3237 | 3271 | ||
3238 | if (USES_PPGTT(dev)) { |
3272 | if (USES_PPGTT(dev)) { |
3239 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
3273 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
3240 | /* TODO: Perhaps it shouldn't be gen6 specific */ |
3274 | /* TODO: Perhaps it shouldn't be gen6 specific */ |
3241 | 3275 | ||
3242 | struct i915_hw_ppgtt *ppgtt = |
3276 | struct i915_hw_ppgtt *ppgtt = |
3243 | container_of(vm, struct i915_hw_ppgtt, |
3277 | container_of(vm, struct i915_hw_ppgtt, |
3244 | base); |
3278 | base); |
3245 | 3279 | ||
3246 | if (i915_is_ggtt(vm)) |
3280 | if (i915_is_ggtt(vm)) |
3247 | ppgtt = dev_priv->mm.aliasing_ppgtt; |
3281 | ppgtt = dev_priv->mm.aliasing_ppgtt; |
3248 | 3282 | ||
3249 | gen6_write_page_range(dev_priv, &ppgtt->pd, |
3283 | gen6_write_page_range(dev_priv, &ppgtt->pd, |
3250 | 0, ppgtt->base.total); |
3284 | 0, ppgtt->base.total); |
3251 | } |
3285 | } |
3252 | } |
3286 | } |
3253 | 3287 | ||
3254 | i915_ggtt_flush(dev_priv); |
3288 | i915_ggtt_flush(dev_priv); |
3255 | } |
3289 | } |
3256 | 3290 | ||
3257 | static struct i915_vma * |
3291 | static struct i915_vma * |
3258 | __i915_gem_vma_create(struct drm_i915_gem_object *obj, |
3292 | __i915_gem_vma_create(struct drm_i915_gem_object *obj, |
3259 | struct i915_address_space *vm, |
3293 | struct i915_address_space *vm, |
3260 | const struct i915_ggtt_view *ggtt_view) |
3294 | const struct i915_ggtt_view *ggtt_view) |
3261 | { |
3295 | { |
3262 | struct i915_vma *vma; |
3296 | struct i915_vma *vma; |
3263 | 3297 | ||
3264 | if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) |
3298 | if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) |
3265 | return ERR_PTR(-EINVAL); |
3299 | return ERR_PTR(-EINVAL); |
3266 | 3300 | ||
3267 | // vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); |
3301 | // vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); |
3268 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
3302 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
3269 | if (vma == NULL) |
3303 | if (vma == NULL) |
3270 | return ERR_PTR(-ENOMEM); |
3304 | return ERR_PTR(-ENOMEM); |
3271 | 3305 | ||
3272 | INIT_LIST_HEAD(&vma->vma_link); |
3306 | INIT_LIST_HEAD(&vma->vm_link); |
3273 | INIT_LIST_HEAD(&vma->mm_list); |
3307 | INIT_LIST_HEAD(&vma->obj_link); |
3274 | INIT_LIST_HEAD(&vma->exec_list); |
3308 | INIT_LIST_HEAD(&vma->exec_list); |
3275 | vma->vm = vm; |
3309 | vma->vm = vm; |
3276 | vma->obj = obj; |
3310 | vma->obj = obj; |
- | 3311 | vma->is_ggtt = i915_is_ggtt(vm); |
|
3277 | 3312 | ||
3278 | if (i915_is_ggtt(vm)) |
3313 | if (i915_is_ggtt(vm)) |
3279 | vma->ggtt_view = *ggtt_view; |
3314 | vma->ggtt_view = *ggtt_view; |
3280 | 3315 | else |
|
3281 | list_add_tail(&vma->vma_link, &obj->vma_list); |
- | |
3282 | if (!i915_is_ggtt(vm)) |
- | |
3283 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); |
3316 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); |
- | 3317 | ||
- | 3318 | list_add_tail(&vma->obj_link, &obj->vma_list); |
|
3284 | 3319 | ||
3285 | return vma; |
3320 | return vma; |
3286 | } |
3321 | } |
3287 | 3322 | ||
3288 | struct i915_vma * |
3323 | struct i915_vma * |
3289 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
3324 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
3290 | struct i915_address_space *vm) |
3325 | struct i915_address_space *vm) |
3291 | { |
3326 | { |
3292 | struct i915_vma *vma; |
3327 | struct i915_vma *vma; |
3293 | 3328 | ||
3294 | vma = i915_gem_obj_to_vma(obj, vm); |
3329 | vma = i915_gem_obj_to_vma(obj, vm); |
3295 | if (!vma) |
3330 | if (!vma) |
3296 | vma = __i915_gem_vma_create(obj, vm, |
3331 | vma = __i915_gem_vma_create(obj, vm, |
3297 | i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); |
3332 | i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); |
3298 | 3333 | ||
3299 | return vma; |
3334 | return vma; |
3300 | } |
3335 | } |
3301 | 3336 | ||
3302 | struct i915_vma * |
3337 | struct i915_vma * |
3303 | i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, |
3338 | i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, |
3304 | const struct i915_ggtt_view *view) |
3339 | const struct i915_ggtt_view *view) |
3305 | { |
3340 | { |
3306 | struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); |
3341 | struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); |
3307 | struct i915_vma *vma; |
3342 | struct i915_vma *vma; |
3308 | 3343 | ||
3309 | if (WARN_ON(!view)) |
3344 | if (WARN_ON(!view)) |
3310 | return ERR_PTR(-EINVAL); |
3345 | return ERR_PTR(-EINVAL); |
3311 | 3346 | ||
3312 | vma = i915_gem_obj_to_ggtt_view(obj, view); |
3347 | vma = i915_gem_obj_to_ggtt_view(obj, view); |
3313 | 3348 | ||
3314 | if (IS_ERR(vma)) |
3349 | if (IS_ERR(vma)) |
3315 | return vma; |
3350 | return vma; |
3316 | 3351 | ||
3317 | if (!vma) |
3352 | if (!vma) |
3318 | vma = __i915_gem_vma_create(obj, ggtt, view); |
3353 | vma = __i915_gem_vma_create(obj, ggtt, view); |
3319 | 3354 | ||
3320 | return vma; |
3355 | return vma; |
3321 | 3356 | ||
3322 | } |
3357 | } |
3323 | 3358 | ||
3324 | static struct scatterlist * |
3359 | static struct scatterlist * |
3325 | rotate_pages(dma_addr_t *in, unsigned int offset, |
3360 | rotate_pages(const dma_addr_t *in, unsigned int offset, |
3326 | unsigned int width, unsigned int height, |
3361 | unsigned int width, unsigned int height, |
- | 3362 | unsigned int stride, |
|
3327 | struct sg_table *st, struct scatterlist *sg) |
3363 | struct sg_table *st, struct scatterlist *sg) |
3328 | { |
3364 | { |
3329 | unsigned int column, row; |
3365 | unsigned int column, row; |
3330 | unsigned int src_idx; |
3366 | unsigned int src_idx; |
3331 | 3367 | ||
3332 | if (!sg) { |
3368 | if (!sg) { |
3333 | st->nents = 0; |
3369 | st->nents = 0; |
3334 | sg = st->sgl; |
3370 | sg = st->sgl; |
3335 | } |
3371 | } |
3336 | 3372 | ||
3337 | for (column = 0; column < width; column++) { |
3373 | for (column = 0; column < width; column++) { |
3338 | src_idx = width * (height - 1) + column; |
3374 | src_idx = stride * (height - 1) + column; |
3339 | for (row = 0; row < height; row++) { |
3375 | for (row = 0; row < height; row++) { |
3340 | st->nents++; |
3376 | st->nents++; |
3341 | /* We don't need the pages, but need to initialize |
3377 | /* We don't need the pages, but need to initialize |
3342 | * the entries so the sg list can be happily traversed. |
3378 | * the entries so the sg list can be happily traversed. |
3343 | * The only thing we need are DMA addresses. |
3379 | * The only thing we need are DMA addresses. |
3344 | */ |
3380 | */ |
3345 | sg_set_page(sg, NULL, PAGE_SIZE, 0); |
3381 | sg_set_page(sg, NULL, PAGE_SIZE, 0); |
3346 | sg_dma_address(sg) = in[offset + src_idx]; |
3382 | sg_dma_address(sg) = in[offset + src_idx]; |
3347 | sg_dma_len(sg) = PAGE_SIZE; |
3383 | sg_dma_len(sg) = PAGE_SIZE; |
3348 | sg = sg_next(sg); |
3384 | sg = sg_next(sg); |
3349 | src_idx -= width; |
3385 | src_idx -= stride; |
3350 | } |
3386 | } |
3351 | } |
3387 | } |
3352 | 3388 | ||
3353 | return sg; |
3389 | return sg; |
3354 | } |
3390 | } |
3355 | 3391 | ||
3356 | static struct sg_table * |
3392 | static struct sg_table * |
3357 | intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, |
3393 | intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, |
3358 | struct drm_i915_gem_object *obj) |
3394 | struct drm_i915_gem_object *obj) |
3359 | { |
3395 | { |
3360 | struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info; |
- | |
3361 | unsigned int size_pages = rot_info->size >> PAGE_SHIFT; |
3396 | unsigned int size_pages = rot_info->size >> PAGE_SHIFT; |
3362 | unsigned int size_pages_uv; |
3397 | unsigned int size_pages_uv; |
3363 | struct sg_page_iter sg_iter; |
3398 | struct sg_page_iter sg_iter; |
3364 | unsigned long i; |
3399 | unsigned long i; |
3365 | dma_addr_t *page_addr_list; |
3400 | dma_addr_t *page_addr_list; |
3366 | struct sg_table *st; |
3401 | struct sg_table *st; |
3367 | unsigned int uv_start_page; |
3402 | unsigned int uv_start_page; |
3368 | struct scatterlist *sg; |
3403 | struct scatterlist *sg; |
3369 | int ret = -ENOMEM; |
3404 | int ret = -ENOMEM; |
3370 | 3405 | ||
3371 | /* Allocate a temporary list of source pages for random access. */ |
3406 | /* Allocate a temporary list of source pages for random access. */ |
3372 | page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, |
3407 | page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, |
3373 | sizeof(dma_addr_t)); |
3408 | sizeof(dma_addr_t)); |
3374 | if (!page_addr_list) |
3409 | if (!page_addr_list) |
3375 | return ERR_PTR(ret); |
3410 | return ERR_PTR(ret); |
3376 | 3411 | ||
3377 | /* Account for UV plane with NV12. */ |
3412 | /* Account for UV plane with NV12. */ |
3378 | if (rot_info->pixel_format == DRM_FORMAT_NV12) |
3413 | if (rot_info->pixel_format == DRM_FORMAT_NV12) |
3379 | size_pages_uv = rot_info->size_uv >> PAGE_SHIFT; |
3414 | size_pages_uv = rot_info->size_uv >> PAGE_SHIFT; |
3380 | else |
3415 | else |
3381 | size_pages_uv = 0; |
3416 | size_pages_uv = 0; |
3382 | 3417 | ||
3383 | /* Allocate target SG list. */ |
3418 | /* Allocate target SG list. */ |
3384 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
3419 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
3385 | if (!st) |
3420 | if (!st) |
3386 | goto err_st_alloc; |
3421 | goto err_st_alloc; |
3387 | 3422 | ||
3388 | ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL); |
3423 | ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL); |
3389 | if (ret) |
3424 | if (ret) |
3390 | goto err_sg_alloc; |
3425 | goto err_sg_alloc; |
3391 | 3426 | ||
3392 | /* Populate source page list from the object. */ |
3427 | /* Populate source page list from the object. */ |
3393 | i = 0; |
3428 | i = 0; |
3394 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
3429 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
3395 | page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); |
3430 | page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); |
3396 | i++; |
3431 | i++; |
3397 | } |
3432 | } |
3398 | 3433 | ||
3399 | /* Rotate the pages. */ |
3434 | /* Rotate the pages. */ |
3400 | sg = rotate_pages(page_addr_list, 0, |
3435 | sg = rotate_pages(page_addr_list, 0, |
3401 | rot_info->width_pages, rot_info->height_pages, |
3436 | rot_info->width_pages, rot_info->height_pages, |
- | 3437 | rot_info->width_pages, |
|
3402 | st, NULL); |
3438 | st, NULL); |
3403 | 3439 | ||
3404 | /* Append the UV plane if NV12. */ |
3440 | /* Append the UV plane if NV12. */ |
3405 | if (rot_info->pixel_format == DRM_FORMAT_NV12) { |
3441 | if (rot_info->pixel_format == DRM_FORMAT_NV12) { |
3406 | uv_start_page = size_pages; |
3442 | uv_start_page = size_pages; |
3407 | 3443 | ||
3408 | /* Check for tile-row un-alignment. */ |
3444 | /* Check for tile-row un-alignment. */ |
3409 | if (offset_in_page(rot_info->uv_offset)) |
3445 | if (offset_in_page(rot_info->uv_offset)) |
3410 | uv_start_page--; |
3446 | uv_start_page--; |
3411 | 3447 | ||
3412 | rot_info->uv_start_page = uv_start_page; |
3448 | rot_info->uv_start_page = uv_start_page; |
3413 | 3449 | ||
3414 | rotate_pages(page_addr_list, uv_start_page, |
3450 | rotate_pages(page_addr_list, uv_start_page, |
3415 | rot_info->width_pages_uv, |
3451 | rot_info->width_pages_uv, |
3416 | rot_info->height_pages_uv, |
3452 | rot_info->height_pages_uv, |
- | 3453 | rot_info->width_pages_uv, |
|
3417 | st, sg); |
3454 | st, sg); |
3418 | } |
3455 | } |
3419 | 3456 | ||
3420 | DRM_DEBUG_KMS( |
3457 | DRM_DEBUG_KMS( |
3421 | "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n", |
3458 | "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n", |
3422 | obj->base.size, rot_info->pitch, rot_info->height, |
3459 | obj->base.size, rot_info->pitch, rot_info->height, |
3423 | rot_info->pixel_format, rot_info->width_pages, |
3460 | rot_info->pixel_format, rot_info->width_pages, |
3424 | rot_info->height_pages, size_pages + size_pages_uv, |
3461 | rot_info->height_pages, size_pages + size_pages_uv, |
3425 | size_pages); |
3462 | size_pages); |
3426 | 3463 | ||
3427 | drm_free_large(page_addr_list); |
3464 | drm_free_large(page_addr_list); |
3428 | 3465 | ||
3429 | return st; |
3466 | return st; |
3430 | 3467 | ||
3431 | err_sg_alloc: |
3468 | err_sg_alloc: |
3432 | kfree(st); |
3469 | kfree(st); |
3433 | err_st_alloc: |
3470 | err_st_alloc: |
3434 | drm_free_large(page_addr_list); |
3471 | drm_free_large(page_addr_list); |
3435 | 3472 | ||
3436 | DRM_DEBUG_KMS( |
3473 | DRM_DEBUG_KMS( |
3437 | "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n", |
3474 | "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n", |
3438 | obj->base.size, ret, rot_info->pitch, rot_info->height, |
3475 | obj->base.size, ret, rot_info->pitch, rot_info->height, |
3439 | rot_info->pixel_format, rot_info->width_pages, |
3476 | rot_info->pixel_format, rot_info->width_pages, |
3440 | rot_info->height_pages, size_pages + size_pages_uv, |
3477 | rot_info->height_pages, size_pages + size_pages_uv, |
3441 | size_pages); |
3478 | size_pages); |
3442 | return ERR_PTR(ret); |
3479 | return ERR_PTR(ret); |
3443 | } |
3480 | } |
3444 | 3481 | ||
3445 | static struct sg_table * |
3482 | static struct sg_table * |
3446 | intel_partial_pages(const struct i915_ggtt_view *view, |
3483 | intel_partial_pages(const struct i915_ggtt_view *view, |
3447 | struct drm_i915_gem_object *obj) |
3484 | struct drm_i915_gem_object *obj) |
3448 | { |
3485 | { |
3449 | struct sg_table *st; |
3486 | struct sg_table *st; |
3450 | struct scatterlist *sg; |
3487 | struct scatterlist *sg; |
3451 | struct sg_page_iter obj_sg_iter; |
3488 | struct sg_page_iter obj_sg_iter; |
3452 | int ret = -ENOMEM; |
3489 | int ret = -ENOMEM; |
3453 | 3490 | ||
3454 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
3491 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
3455 | if (!st) |
3492 | if (!st) |
3456 | goto err_st_alloc; |
3493 | goto err_st_alloc; |
3457 | 3494 | ||
3458 | ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL); |
3495 | ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL); |
3459 | if (ret) |
3496 | if (ret) |
3460 | goto err_sg_alloc; |
3497 | goto err_sg_alloc; |
3461 | 3498 | ||
3462 | sg = st->sgl; |
3499 | sg = st->sgl; |
3463 | st->nents = 0; |
3500 | st->nents = 0; |
3464 | for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents, |
3501 | for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents, |
3465 | view->params.partial.offset) |
3502 | view->params.partial.offset) |
3466 | { |
3503 | { |
3467 | if (st->nents >= view->params.partial.size) |
3504 | if (st->nents >= view->params.partial.size) |
3468 | break; |
3505 | break; |
3469 | 3506 | ||
3470 | sg_set_page(sg, NULL, PAGE_SIZE, 0); |
3507 | sg_set_page(sg, NULL, PAGE_SIZE, 0); |
3471 | sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter); |
3508 | sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter); |
3472 | sg_dma_len(sg) = PAGE_SIZE; |
3509 | sg_dma_len(sg) = PAGE_SIZE; |
3473 | 3510 | ||
3474 | sg = sg_next(sg); |
3511 | sg = sg_next(sg); |
3475 | st->nents++; |
3512 | st->nents++; |
3476 | } |
3513 | } |
3477 | 3514 | ||
3478 | return st; |
3515 | return st; |
3479 | 3516 | ||
3480 | err_sg_alloc: |
3517 | err_sg_alloc: |
3481 | kfree(st); |
3518 | kfree(st); |
3482 | err_st_alloc: |
3519 | err_st_alloc: |
3483 | return ERR_PTR(ret); |
3520 | return ERR_PTR(ret); |
3484 | } |
3521 | } |
3485 | 3522 | ||
3486 | static int |
3523 | static int |
3487 | i915_get_ggtt_vma_pages(struct i915_vma *vma) |
3524 | i915_get_ggtt_vma_pages(struct i915_vma *vma) |
3488 | { |
3525 | { |
3489 | int ret = 0; |
3526 | int ret = 0; |
3490 | 3527 | ||
3491 | if (vma->ggtt_view.pages) |
3528 | if (vma->ggtt_view.pages) |
3492 | return 0; |
3529 | return 0; |
3493 | 3530 | ||
3494 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) |
3531 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) |
3495 | vma->ggtt_view.pages = vma->obj->pages; |
3532 | vma->ggtt_view.pages = vma->obj->pages; |
3496 | else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) |
3533 | else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) |
3497 | vma->ggtt_view.pages = |
3534 | vma->ggtt_view.pages = |
3498 | intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); |
3535 | intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); |
3499 | else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) |
3536 | else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) |
3500 | vma->ggtt_view.pages = |
3537 | vma->ggtt_view.pages = |
3501 | intel_partial_pages(&vma->ggtt_view, vma->obj); |
3538 | intel_partial_pages(&vma->ggtt_view, vma->obj); |
3502 | else |
3539 | else |
3503 | WARN_ONCE(1, "GGTT view %u not implemented!\n", |
3540 | WARN_ONCE(1, "GGTT view %u not implemented!\n", |
3504 | vma->ggtt_view.type); |
3541 | vma->ggtt_view.type); |
3505 | 3542 | ||
3506 | if (!vma->ggtt_view.pages) { |
3543 | if (!vma->ggtt_view.pages) { |
3507 | DRM_ERROR("Failed to get pages for GGTT view type %u!\n", |
3544 | DRM_ERROR("Failed to get pages for GGTT view type %u!\n", |
3508 | vma->ggtt_view.type); |
3545 | vma->ggtt_view.type); |
3509 | ret = -EINVAL; |
3546 | ret = -EINVAL; |
3510 | } else if (IS_ERR(vma->ggtt_view.pages)) { |
3547 | } else if (IS_ERR(vma->ggtt_view.pages)) { |
3511 | ret = PTR_ERR(vma->ggtt_view.pages); |
3548 | ret = PTR_ERR(vma->ggtt_view.pages); |
3512 | vma->ggtt_view.pages = NULL; |
3549 | vma->ggtt_view.pages = NULL; |
3513 | DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", |
3550 | DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", |
3514 | vma->ggtt_view.type, ret); |
3551 | vma->ggtt_view.type, ret); |
3515 | } |
3552 | } |
3516 | 3553 | ||
3517 | return ret; |
3554 | return ret; |
3518 | } |
3555 | } |
3519 | 3556 | ||
3520 | /** |
3557 | /** |
3521 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. |
3558 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. |
3522 | * @vma: VMA to map |
3559 | * @vma: VMA to map |
3523 | * @cache_level: mapping cache level |
3560 | * @cache_level: mapping cache level |
3524 | * @flags: flags like global or local mapping |
3561 | * @flags: flags like global or local mapping |
3525 | * |
3562 | * |
3526 | * DMA addresses are taken from the scatter-gather table of this object (or of |
3563 | * DMA addresses are taken from the scatter-gather table of this object (or of |
3527 | * this VMA in case of non-default GGTT views) and PTE entries set up. |
3564 | * this VMA in case of non-default GGTT views) and PTE entries set up. |
3528 | * Note that DMA addresses are also the only part of the SG table we care about. |
3565 | * Note that DMA addresses are also the only part of the SG table we care about. |
3529 | */ |
3566 | */ |
3530 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, |
3567 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, |
3531 | u32 flags) |
3568 | u32 flags) |
3532 | { |
3569 | { |
3533 | int ret; |
3570 | int ret; |
3534 | u32 bind_flags; |
3571 | u32 bind_flags; |
3535 | 3572 | ||
3536 | if (WARN_ON(flags == 0)) |
3573 | if (WARN_ON(flags == 0)) |
3537 | return -EINVAL; |
3574 | return -EINVAL; |
3538 | 3575 | ||
3539 | bind_flags = 0; |
3576 | bind_flags = 0; |
3540 | if (flags & PIN_GLOBAL) |
3577 | if (flags & PIN_GLOBAL) |
3541 | bind_flags |= GLOBAL_BIND; |
3578 | bind_flags |= GLOBAL_BIND; |
3542 | if (flags & PIN_USER) |
3579 | if (flags & PIN_USER) |
3543 | bind_flags |= LOCAL_BIND; |
3580 | bind_flags |= LOCAL_BIND; |
3544 | 3581 | ||
3545 | if (flags & PIN_UPDATE) |
3582 | if (flags & PIN_UPDATE) |
3546 | bind_flags |= vma->bound; |
3583 | bind_flags |= vma->bound; |
3547 | else |
3584 | else |
3548 | bind_flags &= ~vma->bound; |
3585 | bind_flags &= ~vma->bound; |
3549 | 3586 | ||
3550 | if (bind_flags == 0) |
3587 | if (bind_flags == 0) |
3551 | return 0; |
3588 | return 0; |
3552 | 3589 | ||
3553 | if (vma->bound == 0 && vma->vm->allocate_va_range) { |
3590 | if (vma->bound == 0 && vma->vm->allocate_va_range) { |
3554 | trace_i915_va_alloc(vma->vm, |
- | |
3555 | vma->node.start, |
- | |
3556 | vma->node.size, |
- | |
3557 | VM_TO_TRACE_NAME(vma->vm)); |
- | |
3558 | - | ||
3559 | /* XXX: i915_vma_pin() will fix this +- hack */ |
3591 | /* XXX: i915_vma_pin() will fix this +- hack */ |
3560 | vma->pin_count++; |
3592 | vma->pin_count++; |
3561 | ret = vma->vm->allocate_va_range(vma->vm, |
3593 | ret = vma->vm->allocate_va_range(vma->vm, |
3562 | vma->node.start, |
3594 | vma->node.start, |
3563 | vma->node.size); |
3595 | vma->node.size); |
3564 | vma->pin_count--; |
3596 | vma->pin_count--; |
3565 | if (ret) |
3597 | if (ret) |
3566 | return ret; |
3598 | return ret; |
3567 | } |
3599 | } |
3568 | 3600 | ||
3569 | ret = vma->vm->bind_vma(vma, cache_level, bind_flags); |
3601 | ret = vma->vm->bind_vma(vma, cache_level, bind_flags); |
3570 | if (ret) |
3602 | if (ret) |
3571 | return ret; |
3603 | return ret; |
3572 | 3604 | ||
3573 | vma->bound |= bind_flags; |
3605 | vma->bound |= bind_flags; |
3574 | 3606 | ||
3575 | return 0; |
3607 | return 0; |
3576 | } |
3608 | } |
3577 | 3609 | ||
3578 | /** |
3610 | /** |
3579 | * i915_ggtt_view_size - Get the size of a GGTT view. |
3611 | * i915_ggtt_view_size - Get the size of a GGTT view. |
3580 | * @obj: Object the view is of. |
3612 | * @obj: Object the view is of. |
3581 | * @view: The view in question. |
3613 | * @view: The view in question. |
3582 | * |
3614 | * |
3583 | * @return The size of the GGTT view in bytes. |
3615 | * @return The size of the GGTT view in bytes. |
3584 | */ |
3616 | */ |
3585 | size_t |
3617 | size_t |
3586 | i915_ggtt_view_size(struct drm_i915_gem_object *obj, |
3618 | i915_ggtt_view_size(struct drm_i915_gem_object *obj, |
3587 | const struct i915_ggtt_view *view) |
3619 | const struct i915_ggtt_view *view) |
3588 | { |
3620 | { |
3589 | if (view->type == I915_GGTT_VIEW_NORMAL) { |
3621 | if (view->type == I915_GGTT_VIEW_NORMAL) { |
3590 | return obj->base.size; |
3622 | return obj->base.size; |
3591 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
3623 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
3592 | return view->params.rotation_info.size; |
3624 | return view->params.rotated.size; |
3593 | } else if (view->type == I915_GGTT_VIEW_PARTIAL) { |
3625 | } else if (view->type == I915_GGTT_VIEW_PARTIAL) { |
3594 | return view->params.partial.size << PAGE_SHIFT; |
3626 | return view->params.partial.size << PAGE_SHIFT; |
3595 | } else { |
3627 | } else { |
3596 | WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type); |
3628 | WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type); |
3597 | return obj->base.size; |
3629 | return obj->base.size; |
3598 | } |
3630 | } |
3599 | }><>>>>=>><>20))))><20))))>20)><20)>>><>><>><>>><>><>>><>>><>><>><>><>><>><>><>>>><>>>>>><>>>><>>>>><>><>><>>>><>><>><>>>>><>>><>>>> |
3631 | }><>>>>=>><>20))))><20))))>20)><20)>>><>><>><>>><>><>>><>>><>><>><>><>><>><>><>>>><>>>>>><>>>><>>>>><>><>><>>>><>><>><>>>>><>>><>>>> |