Rev 4560 | Rev 5354 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2332 | Serge | 1 | /* |
2 | * Copyright © 2010 Daniel Vetter |
||
5060 | serge | 3 | * Copyright © 2011-2014 Intel Corporation |
2332 | Serge | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the "Software"), |
||
7 | * to deal in the Software without restriction, including without limitation |
||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
9 | * and/or sell copies of the Software, and to permit persons to whom the |
||
10 | * Software is furnished to do so, subject to the following conditions: |
||
11 | * |
||
12 | * The above copyright notice and this permission notice (including the next |
||
13 | * paragraph) shall be included in all copies or substantial portions of the |
||
14 | * Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
22 | * IN THE SOFTWARE. |
||
23 | * |
||
24 | */ |
||
25 | |||
3243 | Serge | 26 | |
3480 | Serge | 27 | #define AGP_NORMAL_MEMORY 0 |
28 | |||
29 | #define AGP_USER_TYPES (1 << 16) |
||
30 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
||
31 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
||
32 | |||
3031 | serge | 33 | #include |
34 | #include |
||
2332 | Serge | 35 | #include "i915_drv.h" |
2351 | Serge | 36 | #include "i915_trace.h" |
2332 | Serge | 37 | #include "intel_drv.h" |
38 | |||
5060 | serge | 39 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); |
40 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); |
||
3243 | Serge | 41 | |
5060 | serge | 42 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) |
43 | { |
||
44 | if (i915.enable_ppgtt == 0) |
||
45 | return false; |
||
3243 | Serge | 46 | |
5060 | serge | 47 | if (i915.enable_ppgtt == 1 && full) |
48 | return false; |
||
3243 | Serge | 49 | |
5060 | serge | 50 | return true; |
51 | } |
||
3243 | Serge | 52 | |
5060 | serge | 53 | static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) |
54 | { |
||
55 | if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) |
||
56 | return 0; |
||
4104 | Serge | 57 | |
5060 | serge | 58 | if (enable_ppgtt == 1) |
59 | return 1; |
||
4560 | Serge | 60 | |
5060 | serge | 61 | if (enable_ppgtt == 2 && HAS_PPGTT(dev)) |
62 | return 2; |
||
4560 | Serge | 63 | |
5060 | serge | 64 | #ifdef CONFIG_INTEL_IOMMU |
65 | /* Disable ppgtt on SNB if VT-d is on. */ |
||
66 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
||
67 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
||
68 | return 0; |
||
69 | } |
||
70 | #endif |
||
71 | |||
72 | /* Early VLV doesn't have this */ |
||
73 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && |
||
74 | dev->pdev->revision < 0xb) { |
||
75 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
||
76 | return 0; |
||
77 | } |
||
78 | |||
79 | return HAS_ALIASING_PPGTT(dev) ? 1 : 0; |
||
80 | } |
||
81 | |||
82 | |||
83 | static void ppgtt_bind_vma(struct i915_vma *vma, |
||
84 | enum i915_cache_level cache_level, |
||
85 | u32 flags); |
||
86 | static void ppgtt_unbind_vma(struct i915_vma *vma); |
||
87 | static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); |
||
88 | |||
4560 | Serge | 89 | static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, |
90 | enum i915_cache_level level, |
||
91 | bool valid) |
||
92 | { |
||
5060 | serge | 93 | gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; |
4560 | Serge | 94 | pte |= addr; |
5060 | serge | 95 | |
96 | switch (level) { |
||
97 | case I915_CACHE_NONE: |
||
98 | pte |= PPAT_UNCACHED_INDEX; |
||
99 | break; |
||
100 | case I915_CACHE_WT: |
||
101 | pte |= PPAT_DISPLAY_ELLC_INDEX; |
||
102 | break; |
||
103 | default: |
||
4560 | Serge | 104 | pte |= PPAT_CACHED_INDEX; |
5060 | serge | 105 | break; |
106 | } |
||
107 | |||
4560 | Serge | 108 | return pte; |
109 | } |
||
110 | |||
111 | static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, |
||
112 | dma_addr_t addr, |
||
113 | enum i915_cache_level level) |
||
114 | { |
||
115 | gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
||
116 | pde |= addr; |
||
117 | if (level != I915_CACHE_NONE) |
||
118 | pde |= PPAT_CACHED_PDE_INDEX; |
||
119 | else |
||
120 | pde |= PPAT_UNCACHED_INDEX; |
||
121 | return pde; |
||
122 | } |
||
123 | |||
4104 | Serge | 124 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
4280 | Serge | 125 | enum i915_cache_level level, |
5060 | serge | 126 | bool valid, u32 unused) |
4104 | Serge | 127 | { |
4280 | Serge | 128 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
4104 | Serge | 129 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
130 | |||
131 | switch (level) { |
||
132 | case I915_CACHE_L3_LLC: |
||
133 | case I915_CACHE_LLC: |
||
134 | pte |= GEN6_PTE_CACHE_LLC; |
||
135 | break; |
||
136 | case I915_CACHE_NONE: |
||
137 | pte |= GEN6_PTE_UNCACHED; |
||
138 | break; |
||
139 | default: |
||
140 | WARN_ON(1); |
||
141 | } |
||
142 | |||
143 | return pte; |
||
144 | } |
||
145 | |||
146 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
||
4280 | Serge | 147 | enum i915_cache_level level, |
5060 | serge | 148 | bool valid, u32 unused) |
3243 | Serge | 149 | { |
4280 | Serge | 150 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
3243 | Serge | 151 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
152 | |||
153 | switch (level) { |
||
4104 | Serge | 154 | case I915_CACHE_L3_LLC: |
155 | pte |= GEN7_PTE_CACHE_L3_LLC; |
||
3243 | Serge | 156 | break; |
157 | case I915_CACHE_LLC: |
||
158 | pte |= GEN6_PTE_CACHE_LLC; |
||
159 | break; |
||
160 | case I915_CACHE_NONE: |
||
161 | pte |= GEN6_PTE_UNCACHED; |
||
162 | break; |
||
163 | default: |
||
4104 | Serge | 164 | WARN_ON(1); |
3243 | Serge | 165 | } |
166 | |||
167 | return pte; |
||
168 | } |
||
169 | |||
4104 | Serge | 170 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
4280 | Serge | 171 | enum i915_cache_level level, |
5060 | serge | 172 | bool valid, u32 flags) |
3746 | Serge | 173 | { |
4280 | Serge | 174 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
4104 | Serge | 175 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
176 | |||
177 | /* Mark the page as writeable. Other platforms don't have a |
||
178 | * setting for read-only/writable, so this matches that behavior. |
||
179 | */ |
||
5060 | serge | 180 | if (!(flags & PTE_READ_ONLY)) |
4104 | Serge | 181 | pte |= BYT_PTE_WRITEABLE; |
182 | |||
183 | if (level != I915_CACHE_NONE) |
||
184 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
||
185 | |||
186 | return pte; |
||
187 | } |
||
188 | |||
189 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
||
4280 | Serge | 190 | enum i915_cache_level level, |
5060 | serge | 191 | bool valid, u32 unused) |
4104 | Serge | 192 | { |
4280 | Serge | 193 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
4104 | Serge | 194 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
195 | |||
196 | if (level != I915_CACHE_NONE) |
||
197 | pte |= HSW_WB_LLC_AGE3; |
||
198 | |||
199 | return pte; |
||
200 | } |
||
201 | |||
202 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
||
4280 | Serge | 203 | enum i915_cache_level level, |
5060 | serge | 204 | bool valid, u32 unused) |
4104 | Serge | 205 | { |
4280 | Serge | 206 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
4104 | Serge | 207 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
208 | |||
209 | switch (level) { |
||
210 | case I915_CACHE_NONE: |
||
211 | break; |
||
212 | case I915_CACHE_WT: |
||
4560 | Serge | 213 | pte |= HSW_WT_ELLC_LLC_AGE3; |
4104 | Serge | 214 | break; |
215 | default: |
||
4560 | Serge | 216 | pte |= HSW_WB_ELLC_LLC_AGE3; |
4104 | Serge | 217 | break; |
218 | } |
||
219 | |||
220 | return pte; |
||
221 | } |
||
222 | |||
4560 | Serge | 223 | /* Broadwell Page Directory Pointer Descriptors */ |
5060 | serge | 224 | static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, |
225 | uint64_t val, bool synchronous) |
||
4560 | Serge | 226 | { |
5060 | serge | 227 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
4560 | Serge | 228 | int ret; |
229 | |||
230 | BUG_ON(entry >= 4); |
||
231 | |||
5060 | serge | 232 | if (synchronous) { |
233 | I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32); |
||
234 | I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val); |
||
235 | return 0; |
||
236 | } |
||
237 | |||
4560 | Serge | 238 | ret = intel_ring_begin(ring, 6); |
239 | if (ret) |
||
240 | return ret; |
||
241 | |||
242 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
243 | intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); |
||
244 | intel_ring_emit(ring, (u32)(val >> 32)); |
||
245 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
246 | intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); |
||
247 | intel_ring_emit(ring, (u32)(val)); |
||
248 | intel_ring_advance(ring); |
||
249 | |||
250 | return 0; |
||
251 | } |
||
252 | |||
5060 | serge | 253 | static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, |
254 | struct intel_engine_cs *ring, |
||
255 | bool synchronous) |
||
4560 | Serge | 256 | { |
5060 | serge | 257 | int i, ret; |
4560 | Serge | 258 | |
259 | /* bit of a hack to find the actual last used pd */ |
||
260 | int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; |
||
261 | |||
262 | for (i = used_pd - 1; i >= 0; i--) { |
||
263 | dma_addr_t addr = ppgtt->pd_dma_addr[i]; |
||
5060 | serge | 264 | ret = gen8_write_pdp(ring, i, addr, synchronous); |
4560 | Serge | 265 | if (ret) |
5060 | serge | 266 | return ret; |
4560 | Serge | 267 | } |
5060 | serge | 268 | |
4560 | Serge | 269 | return 0; |
270 | } |
||
271 | |||
272 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
||
5060 | serge | 273 | uint64_t start, |
274 | uint64_t length, |
||
4560 | Serge | 275 | bool use_scratch) |
276 | { |
||
277 | struct i915_hw_ppgtt *ppgtt = |
||
278 | container_of(vm, struct i915_hw_ppgtt, base); |
||
279 | gen8_gtt_pte_t *pt_vaddr, scratch_pte; |
||
5060 | serge | 280 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; |
281 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; |
||
282 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; |
||
283 | unsigned num_entries = length >> PAGE_SHIFT; |
||
4560 | Serge | 284 | unsigned last_pte, i; |
285 | |||
286 | pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096); |
||
287 | if(pt_vaddr == NULL) |
||
288 | return; |
||
289 | |||
290 | scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, |
||
291 | I915_CACHE_LLC, use_scratch); |
||
292 | |||
293 | while (num_entries) { |
||
5060 | serge | 294 | struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; |
4560 | Serge | 295 | |
5060 | serge | 296 | last_pte = pte + num_entries; |
4560 | Serge | 297 | if (last_pte > GEN8_PTES_PER_PAGE) |
298 | last_pte = GEN8_PTES_PER_PAGE; |
||
299 | |||
5060 | serge | 300 | MapPage(pt_vaddr,(addr_t)page_table, PG_SW); |
4560 | Serge | 301 | |
5060 | serge | 302 | for (i = pte; i < last_pte; i++) { |
4560 | Serge | 303 | pt_vaddr[i] = scratch_pte; |
5060 | serge | 304 | num_entries--; |
305 | } |
||
4560 | Serge | 306 | |
5060 | serge | 307 | if (!HAS_LLC(ppgtt->base.dev)) |
308 | drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); |
||
309 | |||
310 | pte = 0; |
||
311 | if (++pde == GEN8_PDES_PER_PAGE) { |
||
312 | pdpe++; |
||
313 | pde = 0; |
||
314 | } |
||
4560 | Serge | 315 | } |
316 | FreeKernelSpace(pt_vaddr); |
||
317 | } |
||
318 | |||
319 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, |
||
320 | struct sg_table *pages, |
||
5060 | serge | 321 | uint64_t start, |
322 | enum i915_cache_level cache_level, u32 unused) |
||
4560 | Serge | 323 | { |
324 | struct i915_hw_ppgtt *ppgtt = |
||
325 | container_of(vm, struct i915_hw_ppgtt, base); |
||
326 | gen8_gtt_pte_t *pt_vaddr; |
||
5060 | serge | 327 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; |
328 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; |
||
329 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; |
||
4560 | Serge | 330 | struct sg_page_iter sg_iter; |
331 | |||
332 | pt_vaddr = AllocKernelSpace(4096); |
||
333 | if(pt_vaddr == NULL) |
||
334 | return; |
||
335 | |||
5060 | serge | 336 | MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3); |
4560 | Serge | 337 | |
338 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
||
5060 | serge | 339 | if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) |
340 | break; |
||
4560 | Serge | 341 | |
5060 | serge | 342 | pt_vaddr[pte] = |
4560 | Serge | 343 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), |
344 | cache_level, true); |
||
5060 | serge | 345 | if (++pte == GEN8_PTES_PER_PAGE) { |
346 | if (!HAS_LLC(ppgtt->base.dev)) |
||
347 | drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); |
||
348 | if (++pde == GEN8_PDES_PER_PAGE) { |
||
349 | pdpe++; |
||
350 | pde = 0; |
||
351 | } |
||
352 | pte = 0; |
||
353 | MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3); |
||
4560 | Serge | 354 | } |
355 | } |
||
356 | FreeKernelSpace(pt_vaddr); |
||
357 | } |
||
358 | |||
5060 | serge | 359 | static void gen8_free_page_tables(struct page **pt_pages) |
4560 | Serge | 360 | { |
5060 | serge | 361 | int i; |
362 | |||
363 | if (pt_pages == NULL) |
||
364 | return; |
||
365 | |||
366 | // for (i = 0; i < GEN8_PDES_PER_PAGE; i++) |
||
367 | // if (pt_pages[i]) |
||
368 | // __free_pages(pt_pages[i], 0); |
||
369 | } |
||
370 | |||
371 | static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) |
||
372 | { |
||
373 | int i; |
||
374 | |||
375 | for (i = 0; i < ppgtt->num_pd_pages; i++) { |
||
376 | gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); |
||
377 | kfree(ppgtt->gen8_pt_pages[i]); |
||
378 | kfree(ppgtt->gen8_pt_dma_addr[i]); |
||
379 | } |
||
380 | |||
381 | // __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); |
||
382 | } |
||
383 | |||
384 | static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) |
||
385 | { |
||
386 | struct pci_dev *hwdev = ppgtt->base.dev->pdev; |
||
4560 | Serge | 387 | int i, j; |
388 | |||
5060 | serge | 389 | for (i = 0; i < ppgtt->num_pd_pages; i++) { |
390 | /* TODO: In the future we'll support sparse mappings, so this |
||
391 | * will have to change. */ |
||
392 | if (!ppgtt->pd_dma_addr[i]) |
||
393 | continue; |
||
4560 | Serge | 394 | |
5060 | serge | 395 | pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, |
396 | PCI_DMA_BIDIRECTIONAL); |
||
4560 | Serge | 397 | |
398 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
||
399 | dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; |
||
400 | if (addr) |
||
5060 | serge | 401 | pci_unmap_page(hwdev, addr, PAGE_SIZE, |
4560 | Serge | 402 | PCI_DMA_BIDIRECTIONAL); |
5060 | serge | 403 | } |
404 | } |
||
405 | } |
||
4560 | Serge | 406 | |
5060 | serge | 407 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
408 | { |
||
409 | struct i915_hw_ppgtt *ppgtt = |
||
410 | container_of(vm, struct i915_hw_ppgtt, base); |
||
411 | |||
412 | list_del(&vm->global_link); |
||
413 | drm_mm_takedown(&vm->mm); |
||
414 | |||
415 | gen8_ppgtt_unmap_pages(ppgtt); |
||
416 | gen8_ppgtt_free(ppgtt); |
||
417 | } |
||
418 | |||
419 | static struct page **__gen8_alloc_page_tables(void) |
||
420 | { |
||
421 | struct page **pt_pages; |
||
422 | int i; |
||
423 | |||
424 | pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL); |
||
425 | if (!pt_pages) |
||
426 | return ERR_PTR(-ENOMEM); |
||
427 | |||
428 | for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { |
||
429 | pt_pages[i] = alloc_page(GFP_KERNEL); |
||
430 | if (!pt_pages[i]) |
||
431 | goto bail; |
||
4560 | Serge | 432 | } |
5060 | serge | 433 | |
434 | return pt_pages; |
||
435 | |||
436 | bail: |
||
437 | gen8_free_page_tables(pt_pages); |
||
438 | kfree(pt_pages); |
||
439 | return ERR_PTR(-ENOMEM); |
||
440 | } |
||
441 | |||
442 | static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, |
||
443 | const int max_pdp) |
||
444 | { |
||
445 | struct page **pt_pages[GEN8_LEGACY_PDPS]; |
||
446 | int i, ret; |
||
447 | |||
448 | for (i = 0; i < max_pdp; i++) { |
||
449 | pt_pages[i] = __gen8_alloc_page_tables(); |
||
450 | if (IS_ERR(pt_pages[i])) { |
||
451 | ret = PTR_ERR(pt_pages[i]); |
||
452 | goto unwind_out; |
||
4560 | Serge | 453 | } |
454 | } |
||
455 | |||
5060 | serge | 456 | /* NB: Avoid touching gen8_pt_pages until last to keep the allocation, |
457 | * "atomic" - for cleanup purposes. |
||
458 | */ |
||
459 | for (i = 0; i < max_pdp; i++) |
||
460 | ppgtt->gen8_pt_pages[i] = pt_pages[i]; |
||
461 | |||
462 | return 0; |
||
463 | |||
464 | unwind_out: |
||
465 | while (i--) { |
||
466 | gen8_free_page_tables(pt_pages[i]); |
||
467 | kfree(pt_pages[i]); |
||
468 | } |
||
469 | |||
470 | return ret; |
||
4560 | Serge | 471 | } |
472 | |||
5060 | serge | 473 | static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) |
474 | { |
||
475 | int i; |
||
476 | |||
477 | for (i = 0; i < ppgtt->num_pd_pages; i++) { |
||
478 | ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, |
||
479 | sizeof(dma_addr_t), |
||
480 | GFP_KERNEL); |
||
481 | if (!ppgtt->gen8_pt_dma_addr[i]) |
||
482 | return -ENOMEM; |
||
483 | } |
||
484 | |||
485 | return 0; |
||
486 | } |
||
487 | |||
488 | static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, |
||
489 | const int max_pdp) |
||
490 | { |
||
491 | // ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); |
||
492 | if (!ppgtt->pd_pages) |
||
493 | return -ENOMEM; |
||
494 | |||
495 | // ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); |
||
496 | BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); |
||
497 | |||
498 | return 0; |
||
499 | } |
||
500 | |||
501 | static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, |
||
502 | const int max_pdp) |
||
503 | { |
||
504 | int ret; |
||
505 | |||
506 | ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); |
||
507 | if (ret) |
||
508 | return ret; |
||
509 | |||
510 | ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); |
||
511 | if (ret) { |
||
512 | // __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); |
||
513 | return ret; |
||
514 | } |
||
515 | |||
516 | ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; |
||
517 | |||
518 | ret = gen8_ppgtt_allocate_dma(ppgtt); |
||
519 | if (ret) |
||
520 | gen8_ppgtt_free(ppgtt); |
||
521 | |||
522 | return ret; |
||
523 | } |
||
524 | |||
525 | static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, |
||
526 | const int pd) |
||
527 | { |
||
528 | dma_addr_t pd_addr; |
||
529 | int ret; |
||
530 | |||
531 | pd_addr = pci_map_page(ppgtt->base.dev->pdev, |
||
532 | &ppgtt->pd_pages[pd], 0, |
||
533 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
||
534 | |||
535 | // ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); |
||
536 | // if (ret) |
||
537 | // return ret; |
||
538 | |||
539 | ppgtt->pd_dma_addr[pd] = pd_addr; |
||
540 | |||
541 | return 0; |
||
542 | } |
||
543 | |||
544 | static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, |
||
545 | const int pd, |
||
546 | const int pt) |
||
547 | { |
||
548 | dma_addr_t pt_addr; |
||
549 | struct page *p; |
||
550 | int ret; |
||
551 | |||
552 | p = ppgtt->gen8_pt_pages[pd][pt]; |
||
553 | pt_addr = pci_map_page(ppgtt->base.dev->pdev, |
||
554 | p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
||
555 | // ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); |
||
556 | // if (ret) |
||
557 | // return ret; |
||
558 | |||
559 | ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; |
||
560 | |||
561 | return 0; |
||
562 | } |
||
563 | |||
4560 | Serge | 564 | /** |
5060 | serge | 565 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
566 | * with a net effect resembling a 2-level page table in normal x86 terms. Each |
||
567 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address |
||
568 | * space. |
||
4560 | Serge | 569 | * |
5060 | serge | 570 | * FIXME: split allocation into smaller pieces. For now we only ever do this |
571 | * once, but with full PPGTT, the multiple contiguous allocations will be bad. |
||
4560 | Serge | 572 | * TODO: Do something with the size parameter |
5060 | serge | 573 | */ |
4560 | Serge | 574 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) |
575 | { |
||
576 | const int max_pdp = DIV_ROUND_UP(size, 1 << 30); |
||
5060 | serge | 577 | const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; |
578 | int i, j, ret; |
||
579 | gen8_ppgtt_pde_t *pd_vaddr; |
||
4560 | Serge | 580 | |
581 | if (size % (1<<30)) |
||
582 | DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); |
||
583 | |||
5060 | serge | 584 | /* 1. Do all our allocations for page directories and page tables. */ |
585 | ret = gen8_ppgtt_alloc(ppgtt, max_pdp); |
||
586 | if (ret) |
||
587 | return ret; |
||
4560 | Serge | 588 | |
589 | /* |
||
5060 | serge | 590 | * 2. Create DMA mappings for the page directories and page tables. |
4560 | Serge | 591 | */ |
592 | for (i = 0; i < max_pdp; i++) { |
||
5060 | serge | 593 | ret = gen8_ppgtt_setup_page_directories(ppgtt, i); |
594 | if (ret) |
||
595 | goto bail; |
||
4560 | Serge | 596 | |
597 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
||
5060 | serge | 598 | ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); |
599 | if (ret) |
||
600 | goto bail; |
||
4560 | Serge | 601 | } |
602 | } |
||
603 | |||
5060 | serge | 604 | /* |
605 | * 3. Map all the page directory entires to point to the page tables |
||
606 | * we've allocated. |
||
607 | * |
||
608 | * For now, the PPGTT helper functions all require that the PDEs are |
||
4560 | Serge | 609 | * plugged in correctly. So we do that now/here. For aliasing PPGTT, we |
5060 | serge | 610 | * will never need to touch the PDEs again. |
611 | */ |
||
4560 | Serge | 612 | |
613 | pd_vaddr = AllocKernelSpace(4096); |
||
614 | |||
615 | for (i = 0; i < max_pdp; i++) { |
||
5060 | serge | 616 | MapPage(pd_vaddr,(addr_t)(&ppgtt->pd_pages[i]), 3); |
4560 | Serge | 617 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
618 | dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; |
||
619 | pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, |
||
620 | I915_CACHE_LLC); |
||
621 | } |
||
5060 | serge | 622 | if (!HAS_LLC(ppgtt->base.dev)) |
623 | drm_clflush_virt_range(pd_vaddr, PAGE_SIZE); |
||
4560 | Serge | 624 | } |
625 | FreeKernelSpace(pd_vaddr); |
||
626 | |||
5060 | serge | 627 | ppgtt->enable = gen8_ppgtt_enable; |
628 | ppgtt->switch_mm = gen8_mm_switch; |
||
629 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
||
630 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
||
631 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
||
632 | ppgtt->base.start = 0; |
||
633 | ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; |
||
4560 | Serge | 634 | |
5060 | serge | 635 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
636 | |||
4560 | Serge | 637 | DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", |
638 | ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); |
||
639 | DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", |
||
5060 | serge | 640 | ppgtt->num_pd_entries, |
641 | (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); |
||
4560 | Serge | 642 | return 0; |
643 | |||
5060 | serge | 644 | bail: |
645 | gen8_ppgtt_unmap_pages(ppgtt); |
||
646 | gen8_ppgtt_free(ppgtt); |
||
4560 | Serge | 647 | return ret; |
648 | } |
||
649 | |||
4104 | Serge | 650 | static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) |
651 | { |
||
652 | struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; |
||
3746 | Serge | 653 | gen6_gtt_pte_t __iomem *pd_addr; |
654 | uint32_t pd_entry; |
||
655 | int i; |
||
656 | |||
4104 | Serge | 657 | WARN_ON(ppgtt->pd_offset & 0x3f); |
3746 | Serge | 658 | pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + |
659 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); |
||
660 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
||
661 | dma_addr_t pt_addr; |
||
662 | |||
663 | pt_addr = ppgtt->pt_dma_addr[i]; |
||
664 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
||
665 | pd_entry |= GEN6_PDE_VALID; |
||
666 | |||
667 | writel(pd_entry, pd_addr + i); |
||
668 | } |
||
669 | readl(pd_addr); |
||
4104 | Serge | 670 | } |
3746 | Serge | 671 | |
5060 | serge | 672 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
4104 | Serge | 673 | { |
674 | BUG_ON(ppgtt->pd_offset & 0x3f); |
||
675 | |||
5060 | serge | 676 | return (ppgtt->pd_offset / 64) << 16; |
677 | } |
||
4104 | Serge | 678 | |
5060 | serge | 679 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
680 | struct intel_engine_cs *ring, |
||
681 | bool synchronous) |
||
682 | { |
||
683 | struct drm_device *dev = ppgtt->base.dev; |
||
684 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
685 | int ret; |
||
3746 | Serge | 686 | |
5060 | serge | 687 | /* If we're in reset, we can assume the GPU is sufficiently idle to |
688 | * manually frob these bits. Ideally we could use the ring functions, |
||
689 | * except our error handling makes it quite difficult (can't use |
||
690 | * intel_ring_begin, ring->flush, or intel_ring_advance) |
||
691 | * |
||
692 | * FIXME: We should try not to special case reset |
||
693 | */ |
||
694 | if (synchronous || |
||
695 | i915_reset_in_progress(&dev_priv->gpu_error)) { |
||
696 | WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); |
||
697 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
||
698 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
||
699 | POSTING_READ(RING_PP_DIR_BASE(ring)); |
||
700 | return 0; |
||
701 | } |
||
3746 | Serge | 702 | |
5060 | serge | 703 | /* NB: TLBs must be flushed and invalidated before a switch */ |
704 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
||
705 | if (ret) |
||
706 | return ret; |
||
3746 | Serge | 707 | |
5060 | serge | 708 | ret = intel_ring_begin(ring, 6); |
709 | if (ret) |
||
710 | return ret; |
||
3746 | Serge | 711 | |
5060 | serge | 712 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
713 | intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); |
||
714 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
||
715 | intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); |
||
716 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
||
717 | intel_ring_emit(ring, MI_NOOP); |
||
718 | intel_ring_advance(ring); |
||
719 | |||
720 | return 0; |
||
721 | } |
||
722 | |||
723 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
||
724 | struct intel_engine_cs *ring, |
||
725 | bool synchronous) |
||
726 | { |
||
727 | struct drm_device *dev = ppgtt->base.dev; |
||
728 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
729 | int ret; |
||
730 | |||
731 | /* If we're in reset, we can assume the GPU is sufficiently idle to |
||
732 | * manually frob these bits. Ideally we could use the ring functions, |
||
733 | * except our error handling makes it quite difficult (can't use |
||
734 | * intel_ring_begin, ring->flush, or intel_ring_advance) |
||
735 | * |
||
736 | * FIXME: We should try not to special case reset |
||
737 | */ |
||
738 | if (synchronous || |
||
739 | i915_reset_in_progress(&dev_priv->gpu_error)) { |
||
740 | WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); |
||
741 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
||
742 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
||
743 | POSTING_READ(RING_PP_DIR_BASE(ring)); |
||
744 | return 0; |
||
745 | } |
||
746 | |||
747 | /* NB: TLBs must be flushed and invalidated before a switch */ |
||
748 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
||
749 | if (ret) |
||
750 | return ret; |
||
751 | |||
752 | ret = intel_ring_begin(ring, 6); |
||
753 | if (ret) |
||
754 | return ret; |
||
755 | |||
756 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
||
757 | intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); |
||
758 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
||
759 | intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); |
||
760 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
||
761 | intel_ring_emit(ring, MI_NOOP); |
||
762 | intel_ring_advance(ring); |
||
763 | |||
764 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ |
||
765 | if (ring->id != RCS) { |
||
766 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
||
767 | if (ret) |
||
768 | return ret; |
||
769 | } |
||
770 | |||
771 | return 0; |
||
772 | } |
||
773 | |||
774 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
||
775 | struct intel_engine_cs *ring, |
||
776 | bool synchronous) |
||
777 | { |
||
778 | struct drm_device *dev = ppgtt->base.dev; |
||
779 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
780 | |||
781 | if (!synchronous) |
||
782 | return 0; |
||
783 | |||
784 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
||
785 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
||
786 | |||
787 | POSTING_READ(RING_PP_DIR_DCLV(ring)); |
||
788 | |||
789 | return 0; |
||
790 | } |
||
791 | |||
792 | static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
||
793 | { |
||
794 | struct drm_device *dev = ppgtt->base.dev; |
||
795 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
796 | struct intel_engine_cs *ring; |
||
797 | int j, ret; |
||
798 | |||
799 | for_each_ring(ring, dev_priv, j) { |
||
800 | I915_WRITE(RING_MODE_GEN7(ring), |
||
801 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
||
802 | |||
803 | /* We promise to do a switch later with FULL PPGTT. If this is |
||
804 | * aliasing, this is the one and only switch we'll do */ |
||
805 | if (USES_FULL_PPGTT(dev)) |
||
806 | continue; |
||
807 | |||
808 | ret = ppgtt->switch_mm(ppgtt, ring, true); |
||
809 | if (ret) |
||
810 | goto err_out; |
||
811 | } |
||
812 | |||
813 | return 0; |
||
814 | |||
815 | err_out: |
||
816 | for_each_ring(ring, dev_priv, j) |
||
817 | I915_WRITE(RING_MODE_GEN7(ring), |
||
818 | _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); |
||
819 | return ret; |
||
820 | } |
||
821 | |||
822 | static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
||
823 | { |
||
824 | struct drm_device *dev = ppgtt->base.dev; |
||
825 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
826 | struct intel_engine_cs *ring; |
||
3746 | Serge | 827 | uint32_t ecochk, ecobits; |
5060 | serge | 828 | int i; |
3746 | Serge | 829 | |
830 | ecobits = I915_READ(GAC_ECO_BITS); |
||
831 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
||
832 | |||
833 | ecochk = I915_READ(GAM_ECOCHK); |
||
834 | if (IS_HASWELL(dev)) { |
||
835 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
||
836 | } else { |
||
837 | ecochk |= ECOCHK_PPGTT_LLC_IVB; |
||
838 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; |
||
839 | } |
||
840 | I915_WRITE(GAM_ECOCHK, ecochk); |
||
841 | |||
842 | for_each_ring(ring, dev_priv, i) { |
||
5060 | serge | 843 | int ret; |
844 | /* GFX_MODE is per-ring on gen7+ */ |
||
3746 | Serge | 845 | I915_WRITE(RING_MODE_GEN7(ring), |
846 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
||
847 | |||
5060 | serge | 848 | /* We promise to do a switch later with FULL PPGTT. If this is |
849 | * aliasing, this is the one and only switch we'll do */ |
||
850 | if (USES_FULL_PPGTT(dev)) |
||
851 | continue; |
||
852 | |||
853 | ret = ppgtt->switch_mm(ppgtt, ring, true); |
||
854 | if (ret) |
||
855 | return ret; |
||
3746 | Serge | 856 | } |
5060 | serge | 857 | |
3746 | Serge | 858 | return 0; |
859 | } |
||
860 | |||
5060 | serge | 861 | static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
862 | { |
||
863 | struct drm_device *dev = ppgtt->base.dev; |
||
864 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
865 | struct intel_engine_cs *ring; |
||
866 | uint32_t ecochk, gab_ctl, ecobits; |
||
867 | int i; |
||
868 | |||
869 | ecobits = I915_READ(GAC_ECO_BITS); |
||
870 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
||
871 | ECOBITS_PPGTT_CACHE64B); |
||
872 | |||
873 | gab_ctl = I915_READ(GAB_CTL); |
||
874 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
||
875 | |||
876 | ecochk = I915_READ(GAM_ECOCHK); |
||
877 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
||
878 | |||
879 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
||
880 | |||
881 | for_each_ring(ring, dev_priv, i) { |
||
882 | int ret = ppgtt->switch_mm(ppgtt, ring, true); |
||
883 | if (ret) |
||
884 | return ret; |
||
885 | } |
||
886 | |||
887 | return 0; |
||
888 | } |
||
889 | |||
3031 | serge | 890 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
4104 | Serge | 891 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
5060 | serge | 892 | uint64_t start, |
893 | uint64_t length, |
||
4280 | Serge | 894 | bool use_scratch) |
3031 | serge | 895 | { |
4104 | Serge | 896 | struct i915_hw_ppgtt *ppgtt = |
897 | container_of(vm, struct i915_hw_ppgtt, base); |
||
3746 | Serge | 898 | gen6_gtt_pte_t *pt_vaddr, scratch_pte; |
5060 | serge | 899 | unsigned first_entry = start >> PAGE_SHIFT; |
900 | unsigned num_entries = length >> PAGE_SHIFT; |
||
3746 | Serge | 901 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
3031 | serge | 902 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
903 | unsigned last_pte, i; |
||
904 | |||
5060 | serge | 905 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); |
3031 | serge | 906 | |
907 | pt_vaddr = AllocKernelSpace(4096); |
||
908 | |||
3480 | Serge | 909 | if(pt_vaddr == NULL) |
910 | return; |
||
911 | |||
912 | while (num_entries) { |
||
3031 | serge | 913 | last_pte = first_pte + num_entries; |
914 | if (last_pte > I915_PPGTT_PT_ENTRIES) |
||
915 | last_pte = I915_PPGTT_PT_ENTRIES; |
||
916 | |||
3746 | Serge | 917 | MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
3031 | serge | 918 | |
919 | for (i = first_pte; i < last_pte; i++) |
||
920 | pt_vaddr[i] = scratch_pte; |
||
921 | |||
922 | num_entries -= last_pte - first_pte; |
||
923 | first_pte = 0; |
||
3746 | Serge | 924 | act_pt++; |
3480 | Serge | 925 | }; |
926 | |||
927 | FreeKernelSpace(pt_vaddr); |
||
928 | } |
||
929 | |||
4104 | Serge | 930 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
3480 | Serge | 931 | struct sg_table *pages, |
5060 | serge | 932 | uint64_t start, |
933 | enum i915_cache_level cache_level, u32 flags) |
||
3480 | Serge | 934 | { |
4104 | Serge | 935 | struct i915_hw_ppgtt *ppgtt = |
936 | container_of(vm, struct i915_hw_ppgtt, base); |
||
3746 | Serge | 937 | gen6_gtt_pte_t *pt_vaddr; |
5060 | serge | 938 | unsigned first_entry = start >> PAGE_SHIFT; |
3746 | Serge | 939 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
940 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
||
941 | struct sg_page_iter sg_iter; |
||
3480 | Serge | 942 | |
943 | pt_vaddr = AllocKernelSpace(4096); |
||
944 | |||
945 | if(pt_vaddr == NULL) |
||
946 | return; |
||
947 | |||
3746 | Serge | 948 | MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
949 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
||
3480 | Serge | 950 | |
4560 | Serge | 951 | pt_vaddr[act_pte] = |
952 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
||
5060 | serge | 953 | cache_level, true, flags); |
954 | |||
3746 | Serge | 955 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
956 | act_pt++; |
||
957 | MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
||
958 | act_pte = 0; |
||
4104 | Serge | 959 | } |
3480 | Serge | 960 | } |
4104 | Serge | 961 | FreeKernelSpace(pt_vaddr); |
3031 | serge | 962 | } |
963 | |||
5060 | serge | 964 | static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) |
3031 | serge | 965 | { |
3480 | Serge | 966 | int i; |
967 | |||
968 | if (ppgtt->pt_dma_addr) { |
||
969 | for (i = 0; i < ppgtt->num_pd_entries; i++) |
||
4104 | Serge | 970 | pci_unmap_page(ppgtt->base.dev->pdev, |
3480 | Serge | 971 | ppgtt->pt_dma_addr[i], |
972 | 4096, PCI_DMA_BIDIRECTIONAL); |
||
973 | } |
||
5060 | serge | 974 | } |
3480 | Serge | 975 | |
5060 | serge | 976 | static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) |
977 | { |
||
978 | int i; |
||
979 | |||
3480 | Serge | 980 | kfree(ppgtt->pt_dma_addr); |
981 | for (i = 0; i < ppgtt->num_pd_entries; i++) |
||
982 | __free_page(ppgtt->pt_pages[i]); |
||
983 | kfree(ppgtt->pt_pages); |
||
984 | } |
||
985 | |||
5060 | serge | 986 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
3480 | Serge | 987 | { |
5060 | serge | 988 | struct i915_hw_ppgtt *ppgtt = |
989 | container_of(vm, struct i915_hw_ppgtt, base); |
||
990 | |||
991 | list_del(&vm->global_link); |
||
992 | drm_mm_takedown(&ppgtt->base.mm); |
||
993 | drm_mm_remove_node(&ppgtt->node); |
||
994 | |||
995 | gen6_ppgtt_unmap_pages(ppgtt); |
||
996 | gen6_ppgtt_free(ppgtt); |
||
997 | } |
||
998 | |||
999 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
||
1000 | { |
||
4104 | Serge | 1001 | struct drm_device *dev = ppgtt->base.dev; |
3031 | serge | 1002 | struct drm_i915_private *dev_priv = dev->dev_private; |
5060 | serge | 1003 | bool retried = false; |
1004 | int ret; |
||
3031 | serge | 1005 | |
5060 | serge | 1006 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
1007 | * allocator works in address space sizes, so it's multiplied by page |
||
1008 | * size. We allocate at the top of the GTT to avoid fragmentation. |
||
1009 | */ |
||
1010 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); |
||
1011 | alloc: |
||
1012 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
||
1013 | &ppgtt->node, GEN6_PD_SIZE, |
||
1014 | GEN6_PD_ALIGN, 0, |
||
1015 | 0, dev_priv->gtt.base.total, |
||
1016 | DRM_MM_TOPDOWN); |
||
1017 | if (ret == -ENOSPC && !retried) { |
||
1018 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
||
1019 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
||
1020 | I915_CACHE_NONE, |
||
1021 | 0, dev_priv->gtt.base.total, |
||
1022 | 0); |
||
1023 | if (ret) |
||
1024 | return ret; |
||
3031 | serge | 1025 | |
5060 | serge | 1026 | retried = true; |
1027 | goto alloc; |
||
1028 | } |
||
1029 | |||
1030 | if (ppgtt->node.start < dev_priv->gtt.mappable_end) |
||
1031 | DRM_DEBUG("Forced to use aperture for PDEs\n"); |
||
1032 | |||
4104 | Serge | 1033 | ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; |
5060 | serge | 1034 | return ret; |
1035 | } |
||
1036 | |||
1037 | static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) |
||
1038 | { |
||
1039 | int i; |
||
1040 | |||
4560 | Serge | 1041 | ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), |
3031 | serge | 1042 | GFP_KERNEL); |
5060 | serge | 1043 | |
3031 | serge | 1044 | if (!ppgtt->pt_pages) |
3480 | Serge | 1045 | return -ENOMEM; |
3031 | serge | 1046 | |
1047 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
||
3243 | Serge | 1048 | ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); |
5060 | serge | 1049 | if (!ppgtt->pt_pages[i]) { |
1050 | gen6_ppgtt_free(ppgtt); |
||
1051 | return -ENOMEM; |
||
1052 | } |
||
3031 | serge | 1053 | } |
1054 | |||
5060 | serge | 1055 | return 0; |
1056 | } |
||
3031 | serge | 1057 | |
5060 | serge | 1058 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
1059 | { |
||
1060 | int ret; |
||
3031 | serge | 1061 | |
5060 | serge | 1062 | ret = gen6_ppgtt_allocate_page_directories(ppgtt); |
1063 | if (ret) |
||
1064 | return ret; |
||
3031 | serge | 1065 | |
5060 | serge | 1066 | ret = gen6_ppgtt_allocate_page_tables(ppgtt); |
1067 | if (ret) { |
||
1068 | drm_mm_remove_node(&ppgtt->node); |
||
1069 | return ret; |
||
1070 | } |
||
3031 | serge | 1071 | |
5060 | serge | 1072 | ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), |
1073 | GFP_KERNEL); |
||
1074 | if (!ppgtt->pt_dma_addr) { |
||
1075 | drm_mm_remove_node(&ppgtt->node); |
||
1076 | gen6_ppgtt_free(ppgtt); |
||
1077 | return -ENOMEM; |
||
1078 | } |
||
3031 | serge | 1079 | |
1080 | return 0; |
||
5060 | serge | 1081 | } |
3031 | serge | 1082 | |
5060 | serge | 1083 | static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) |
1084 | { |
||
1085 | struct drm_device *dev = ppgtt->base.dev; |
||
1086 | int i; |
||
1087 | |||
3031 | serge | 1088 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
5060 | serge | 1089 | dma_addr_t pt_addr; |
1090 | |||
1091 | pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, |
||
1092 | PCI_DMA_BIDIRECTIONAL); |
||
1093 | |||
1094 | // if (pci_dma_mapping_error(dev->pdev, pt_addr)) { |
||
1095 | // gen6_ppgtt_unmap_pages(ppgtt); |
||
1096 | // return -EIO; |
||
1097 | // } |
||
1098 | |||
1099 | ppgtt->pt_dma_addr[i] = pt_addr; |
||
3031 | serge | 1100 | } |
1101 | |||
5060 | serge | 1102 | return 0; |
3031 | serge | 1103 | } |
1104 | |||
5060 | serge | 1105 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
3031 | serge | 1106 | { |
5060 | serge | 1107 | struct drm_device *dev = ppgtt->base.dev; |
3031 | serge | 1108 | struct drm_i915_private *dev_priv = dev->dev_private; |
3480 | Serge | 1109 | int ret; |
3031 | serge | 1110 | |
5060 | serge | 1111 | ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
1112 | if (IS_GEN6(dev)) { |
||
1113 | ppgtt->enable = gen6_ppgtt_enable; |
||
1114 | ppgtt->switch_mm = gen6_mm_switch; |
||
1115 | } else if (IS_HASWELL(dev)) { |
||
1116 | ppgtt->enable = gen7_ppgtt_enable; |
||
1117 | ppgtt->switch_mm = hsw_mm_switch; |
||
1118 | } else if (IS_GEN7(dev)) { |
||
1119 | ppgtt->enable = gen7_ppgtt_enable; |
||
1120 | ppgtt->switch_mm = gen7_mm_switch; |
||
1121 | } else |
||
1122 | BUG(); |
||
3031 | serge | 1123 | |
5060 | serge | 1124 | ret = gen6_ppgtt_alloc(ppgtt); |
1125 | if (ret) |
||
1126 | return ret; |
||
1127 | |||
1128 | ret = gen6_ppgtt_setup_page_tables(ppgtt); |
||
1129 | if (ret) { |
||
1130 | gen6_ppgtt_free(ppgtt); |
||
1131 | return ret; |
||
1132 | } |
||
1133 | |||
1134 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
||
1135 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
||
1136 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
||
1137 | ppgtt->base.start = 0; |
||
1138 | ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; |
||
1139 | // ppgtt->debug_dump = gen6_dump_ppgtt; |
||
1140 | |||
1141 | ppgtt->pd_offset = |
||
1142 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); |
||
1143 | |||
1144 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
||
1145 | |||
1146 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", |
||
1147 | ppgtt->node.size >> 20, |
||
1148 | ppgtt->node.start / PAGE_SIZE); |
||
1149 | |||
1150 | return 0; |
||
1151 | } |
||
1152 | |||
1153 | int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
||
1154 | { |
||
1155 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1156 | int ret = 0; |
||
1157 | |||
4104 | Serge | 1158 | ppgtt->base.dev = dev; |
5060 | serge | 1159 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; |
3031 | serge | 1160 | |
3746 | Serge | 1161 | if (INTEL_INFO(dev)->gen < 8) |
5060 | serge | 1162 | ret = gen6_ppgtt_init(ppgtt); |
4560 | Serge | 1163 | else if (IS_GEN8(dev)) |
1164 | ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); |
||
3746 | Serge | 1165 | else |
1166 | BUG(); |
||
1167 | |||
5060 | serge | 1168 | if (!ret) { |
1169 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1170 | kref_init(&ppgtt->ref); |
||
4104 | Serge | 1171 | drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, |
1172 | ppgtt->base.total); |
||
5060 | serge | 1173 | i915_init_vm(dev_priv, &ppgtt->base); |
1174 | if (INTEL_INFO(dev)->gen < 8) { |
||
1175 | gen6_write_pdes(ppgtt); |
||
1176 | DRM_DEBUG("Adding PPGTT at offset %x\n", |
||
1177 | ppgtt->pd_offset << 10); |
||
1178 | } |
||
4104 | Serge | 1179 | } |
3480 | Serge | 1180 | |
1181 | return ret; |
||
3031 | serge | 1182 | } |
1183 | |||
5060 | serge | 1184 | static void |
1185 | ppgtt_bind_vma(struct i915_vma *vma, |
||
1186 | enum i915_cache_level cache_level, |
||
1187 | u32 flags) |
||
3031 | serge | 1188 | { |
5060 | serge | 1189 | /* Currently applicable only to VLV */ |
1190 | if (vma->obj->gt_ro) |
||
1191 | flags |= PTE_READ_ONLY; |
||
3031 | serge | 1192 | |
5060 | serge | 1193 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
1194 | cache_level, flags); |
||
3031 | serge | 1195 | } |
1196 | |||
5060 | serge | 1197 | static void ppgtt_unbind_vma(struct i915_vma *vma) |
3031 | serge | 1198 | { |
5060 | serge | 1199 | vma->vm->clear_range(vma->vm, |
1200 | vma->node.start, |
||
1201 | vma->obj->base.size, |
||
4280 | Serge | 1202 | true); |
3031 | serge | 1203 | } |
1204 | |||
3480 | Serge | 1205 | extern int intel_iommu_gfx_mapped; |
1206 | /* Certain Gen5 chipsets require require idling the GPU before |
||
1207 | * unmapping anything from the GTT when VT-d is enabled. |
||
1208 | */ |
||
1209 | static inline bool needs_idle_maps(struct drm_device *dev) |
||
1210 | { |
||
1211 | #ifdef CONFIG_INTEL_IOMMU |
||
1212 | /* Query intel_iommu to see if we need the workaround. Presumably that |
||
1213 | * was loaded first. |
||
1214 | */ |
||
1215 | if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) |
||
1216 | return true; |
||
1217 | #endif |
||
1218 | return false; |
||
1219 | } |
||
1220 | |||
2344 | Serge | 1221 | static bool do_idling(struct drm_i915_private *dev_priv) |
1222 | { |
||
1223 | bool ret = dev_priv->mm.interruptible; |
||
1224 | |||
3480 | Serge | 1225 | if (unlikely(dev_priv->gtt.do_idle_maps)) { |
2344 | Serge | 1226 | dev_priv->mm.interruptible = false; |
1227 | if (i915_gpu_idle(dev_priv->dev)) { |
||
1228 | DRM_ERROR("Couldn't idle GPU\n"); |
||
1229 | /* Wait a bit, in hopes it avoids the hang */ |
||
1230 | udelay(10); |
||
1231 | } |
||
1232 | } |
||
1233 | |||
1234 | return ret; |
||
1235 | } |
||
1236 | |||
1237 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
||
1238 | { |
||
3480 | Serge | 1239 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
2344 | Serge | 1240 | dev_priv->mm.interruptible = interruptible; |
1241 | } |
||
1242 | |||
4280 | Serge | 1243 | void i915_check_and_clear_faults(struct drm_device *dev) |
1244 | { |
||
1245 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
5060 | serge | 1246 | struct intel_engine_cs *ring; |
4280 | Serge | 1247 | int i; |
1248 | |||
1249 | if (INTEL_INFO(dev)->gen < 6) |
||
1250 | return; |
||
1251 | |||
1252 | for_each_ring(ring, dev_priv, i) { |
||
1253 | u32 fault_reg; |
||
1254 | fault_reg = I915_READ(RING_FAULT_REG(ring)); |
||
1255 | if (fault_reg & RING_FAULT_VALID) { |
||
1256 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
||
1257 | "\tAddr: 0x%08lx\\n" |
||
1258 | "\tAddress space: %s\n" |
||
1259 | "\tSource ID: %d\n" |
||
1260 | "\tType: %d\n", |
||
1261 | fault_reg & PAGE_MASK, |
||
1262 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", |
||
1263 | RING_FAULT_SRCID(fault_reg), |
||
1264 | RING_FAULT_FAULT_TYPE(fault_reg)); |
||
1265 | I915_WRITE(RING_FAULT_REG(ring), |
||
1266 | fault_reg & ~RING_FAULT_VALID); |
||
1267 | } |
||
1268 | } |
||
1269 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
||
1270 | } |
||
1271 | |||
1272 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
||
1273 | { |
||
1274 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1275 | |||
1276 | /* Don't bother messing with faults pre GEN6 as we have little |
||
1277 | * documentation supporting that it's a good idea. |
||
1278 | */ |
||
1279 | if (INTEL_INFO(dev)->gen < 6) |
||
1280 | return; |
||
1281 | |||
1282 | i915_check_and_clear_faults(dev); |
||
1283 | |||
1284 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
||
5060 | serge | 1285 | dev_priv->gtt.base.start, |
1286 | dev_priv->gtt.base.total, |
||
1287 | true); |
||
4280 | Serge | 1288 | } |
1289 | |||
2332 | Serge | 1290 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
1291 | { |
||
1292 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1293 | struct drm_i915_gem_object *obj; |
||
5060 | serge | 1294 | struct i915_address_space *vm; |
2332 | Serge | 1295 | |
4280 | Serge | 1296 | i915_check_and_clear_faults(dev); |
1297 | |||
2332 | Serge | 1298 | /* First fill our portion of the GTT with scratch pages */ |
4104 | Serge | 1299 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
5060 | serge | 1300 | dev_priv->gtt.base.start, |
1301 | dev_priv->gtt.base.total, |
||
4280 | Serge | 1302 | true); |
2332 | Serge | 1303 | |
4104 | Serge | 1304 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
5060 | serge | 1305 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, |
1306 | &dev_priv->gtt.base); |
||
1307 | if (!vma) |
||
1308 | continue; |
||
1309 | |||
4104 | Serge | 1310 | i915_gem_clflush_object(obj, obj->pin_display); |
5060 | serge | 1311 | /* The bind_vma code tries to be smart about tracking mappings. |
1312 | * Unfortunately above, we've just wiped out the mappings |
||
1313 | * without telling our object about it. So we need to fake it. |
||
1314 | */ |
||
1315 | obj->has_global_gtt_mapping = 0; |
||
1316 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
||
2332 | Serge | 1317 | } |
1318 | |||
5060 | serge | 1319 | |
1320 | if (INTEL_INFO(dev)->gen >= 8) { |
||
1321 | if (IS_CHERRYVIEW(dev)) |
||
1322 | chv_setup_private_ppat(dev_priv); |
||
1323 | else |
||
1324 | bdw_setup_private_ppat(dev_priv); |
||
1325 | |||
1326 | return; |
||
1327 | } |
||
1328 | |||
1329 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
||
1330 | /* TODO: Perhaps it shouldn't be gen6 specific */ |
||
1331 | if (i915_is_ggtt(vm)) { |
||
1332 | if (dev_priv->mm.aliasing_ppgtt) |
||
1333 | gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); |
||
1334 | continue; |
||
1335 | } |
||
1336 | |||
1337 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); |
||
1338 | } |
||
1339 | |||
3243 | Serge | 1340 | i915_gem_chipset_flush(dev); |
2332 | Serge | 1341 | } |
1342 | |||
3031 | serge | 1343 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
2332 | Serge | 1344 | { |
3243 | Serge | 1345 | if (obj->has_dma_mapping) |
1346 | return 0; |
||
1347 | |||
3480 | Serge | 1348 | if (!dma_map_sg(&obj->base.dev->pdev->dev, |
1349 | obj->pages->sgl, obj->pages->nents, |
||
1350 | PCI_DMA_BIDIRECTIONAL)) |
||
1351 | return -ENOSPC; |
||
3243 | Serge | 1352 | |
2332 | Serge | 1353 | return 0; |
1354 | } |
||
1355 | |||
4560 | Serge | 1356 | static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) |
1357 | { |
||
1358 | #ifdef writeq |
||
1359 | writeq(pte, addr); |
||
1360 | #else |
||
1361 | iowrite32((u32)pte, addr); |
||
1362 | iowrite32(pte >> 32, addr + 4); |
||
1363 | #endif |
||
1364 | } |
||
1365 | |||
1366 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
||
1367 | struct sg_table *st, |
||
5060 | serge | 1368 | uint64_t start, |
1369 | enum i915_cache_level level, u32 unused) |
||
4560 | Serge | 1370 | { |
1371 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
||
5060 | serge | 1372 | unsigned first_entry = start >> PAGE_SHIFT; |
4560 | Serge | 1373 | gen8_gtt_pte_t __iomem *gtt_entries = |
1374 | (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
||
1375 | int i = 0; |
||
1376 | struct sg_page_iter sg_iter; |
||
5060 | serge | 1377 | dma_addr_t addr = 0; /* shut up gcc */ |
4560 | Serge | 1378 | |
1379 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
||
1380 | addr = sg_dma_address(sg_iter.sg) + |
||
1381 | (sg_iter.sg_pgoffset << PAGE_SHIFT); |
||
1382 | gen8_set_pte(>t_entries[i], |
||
1383 | gen8_pte_encode(addr, level, true)); |
||
1384 | i++; |
||
1385 | } |
||
1386 | |||
1387 | /* |
||
1388 | * XXX: This serves as a posting read to make sure that the PTE has |
||
1389 | * actually been updated. There is some concern that even though |
||
1390 | * registers and PTEs are within the same BAR that they are potentially |
||
1391 | * of NUMA access patterns. Therefore, even with the way we assume |
||
1392 | * hardware should work, we must keep this posting read for paranoia. |
||
1393 | */ |
||
1394 | if (i != 0) |
||
1395 | WARN_ON(readq(>t_entries[i-1]) |
||
1396 | != gen8_pte_encode(addr, level, true)); |
||
1397 | |||
1398 | /* This next bit makes the above posting read even more important. We |
||
1399 | * want to flush the TLBs only after we're certain all the PTE updates |
||
1400 | * have finished. |
||
1401 | */ |
||
1402 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
||
1403 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
||
1404 | } |
||
1405 | |||
3243 | Serge | 1406 | /* |
1407 | * Binds an object into the global gtt with the specified cache level. The object |
||
1408 | * will be accessible to the GPU via commands whose operands reference offsets |
||
1409 | * within the global GTT as well as accessible by the GPU through the GMADR |
||
1410 | * mapped BAR (dev_priv->mm.gtt->gtt). |
||
1411 | */ |
||
4104 | Serge | 1412 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
3480 | Serge | 1413 | struct sg_table *st, |
5060 | serge | 1414 | uint64_t start, |
1415 | enum i915_cache_level level, u32 flags) |
||
3243 | Serge | 1416 | { |
4104 | Serge | 1417 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
5060 | serge | 1418 | unsigned first_entry = start >> PAGE_SHIFT; |
3746 | Serge | 1419 | gen6_gtt_pte_t __iomem *gtt_entries = |
1420 | (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
||
1421 | int i = 0; |
||
1422 | struct sg_page_iter sg_iter; |
||
5060 | serge | 1423 | dma_addr_t addr = 0; |
3243 | Serge | 1424 | |
3746 | Serge | 1425 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
1426 | addr = sg_page_iter_dma_address(&sg_iter); |
||
5060 | serge | 1427 | iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); |
3243 | Serge | 1428 | i++; |
1429 | } |
||
1430 | |||
1431 | /* XXX: This serves as a posting read to make sure that the PTE has |
||
1432 | * actually been updated. There is some concern that even though |
||
1433 | * registers and PTEs are within the same BAR that they are potentially |
||
1434 | * of NUMA access patterns. Therefore, even with the way we assume |
||
1435 | * hardware should work, we must keep this posting read for paranoia. |
||
1436 | */ |
||
5060 | serge | 1437 | if (i != 0) { |
1438 | unsigned long gtt = readl(>t_entries[i-1]); |
||
1439 | WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); |
||
1440 | } |
||
3243 | Serge | 1441 | |
1442 | /* This next bit makes the above posting read even more important. We |
||
1443 | * want to flush the TLBs only after we're certain all the PTE updates |
||
1444 | * have finished. |
||
1445 | */ |
||
1446 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
||
1447 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
||
1448 | } |
||
1449 | |||
4560 | Serge | 1450 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
5060 | serge | 1451 | uint64_t start, |
1452 | uint64_t length, |
||
4560 | Serge | 1453 | bool use_scratch) |
1454 | { |
||
1455 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
||
5060 | serge | 1456 | unsigned first_entry = start >> PAGE_SHIFT; |
1457 | unsigned num_entries = length >> PAGE_SHIFT; |
||
4560 | Serge | 1458 | gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = |
1459 | (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
||
1460 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
||
1461 | int i; |
||
1462 | |||
1463 | if (WARN(num_entries > max_entries, |
||
1464 | "First entry = %d; Num entries = %d (max=%d)\n", |
||
1465 | first_entry, num_entries, max_entries)) |
||
1466 | num_entries = max_entries; |
||
1467 | |||
1468 | scratch_pte = gen8_pte_encode(vm->scratch.addr, |
||
1469 | I915_CACHE_LLC, |
||
1470 | use_scratch); |
||
1471 | for (i = 0; i < num_entries; i++) |
||
1472 | gen8_set_pte(>t_base[i], scratch_pte); |
||
1473 | readl(gtt_base); |
||
1474 | } |
||
1475 | |||
4104 | Serge | 1476 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
5060 | serge | 1477 | uint64_t start, |
1478 | uint64_t length, |
||
4280 | Serge | 1479 | bool use_scratch) |
3480 | Serge | 1480 | { |
4104 | Serge | 1481 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
5060 | serge | 1482 | unsigned first_entry = start >> PAGE_SHIFT; |
1483 | unsigned num_entries = length >> PAGE_SHIFT; |
||
3746 | Serge | 1484 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
1485 | (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
||
3480 | Serge | 1486 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
1487 | int i; |
||
1488 | |||
4126 | Serge | 1489 | if (WARN(num_entries > max_entries, |
1490 | "First entry = %d; Num entries = %d (max=%d)\n", |
||
1491 | first_entry, num_entries, max_entries)) |
||
4104 | Serge | 1492 | num_entries = max_entries; |
3480 | Serge | 1493 | |
5060 | serge | 1494 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); |
4280 | Serge | 1495 | |
3480 | Serge | 1496 | for (i = 0; i < num_entries; i++) |
1497 | iowrite32(scratch_pte, >t_base[i]); |
||
1498 | readl(gtt_base); |
||
1499 | } |
||
1500 | |||
5060 | serge | 1501 | |
1502 | static void i915_ggtt_bind_vma(struct i915_vma *vma, |
||
1503 | enum i915_cache_level cache_level, |
||
1504 | u32 unused) |
||
3480 | Serge | 1505 | { |
5060 | serge | 1506 | const unsigned long entry = vma->node.start >> PAGE_SHIFT; |
3480 | Serge | 1507 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
1508 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
||
1509 | |||
5060 | serge | 1510 | BUG_ON(!i915_is_ggtt(vma->vm)); |
1511 | intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); |
||
1512 | vma->obj->has_global_gtt_mapping = 1; |
||
3480 | Serge | 1513 | } |
1514 | |||
4104 | Serge | 1515 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
5060 | serge | 1516 | uint64_t start, |
1517 | uint64_t length, |
||
4280 | Serge | 1518 | bool unused) |
3480 | Serge | 1519 | { |
5060 | serge | 1520 | unsigned first_entry = start >> PAGE_SHIFT; |
1521 | unsigned num_entries = length >> PAGE_SHIFT; |
||
3480 | Serge | 1522 | intel_gtt_clear_range(first_entry, num_entries); |
1523 | } |
||
1524 | |||
5060 | serge | 1525 | static void i915_ggtt_unbind_vma(struct i915_vma *vma) |
1526 | { |
||
1527 | const unsigned int first = vma->node.start >> PAGE_SHIFT; |
||
1528 | const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; |
||
3480 | Serge | 1529 | |
5060 | serge | 1530 | BUG_ON(!i915_is_ggtt(vma->vm)); |
1531 | vma->obj->has_global_gtt_mapping = 0; |
||
1532 | intel_gtt_clear_range(first, size); |
||
1533 | } |
||
1534 | |||
1535 | static void ggtt_bind_vma(struct i915_vma *vma, |
||
1536 | enum i915_cache_level cache_level, |
||
1537 | u32 flags) |
||
2332 | Serge | 1538 | { |
5060 | serge | 1539 | struct drm_device *dev = vma->vm->dev; |
3480 | Serge | 1540 | struct drm_i915_private *dev_priv = dev->dev_private; |
5060 | serge | 1541 | struct drm_i915_gem_object *obj = vma->obj; |
3480 | Serge | 1542 | |
5060 | serge | 1543 | /* Currently applicable only to VLV */ |
1544 | if (obj->gt_ro) |
||
1545 | flags |= PTE_READ_ONLY; |
||
2332 | Serge | 1546 | |
5060 | serge | 1547 | /* If there is no aliasing PPGTT, or the caller needs a global mapping, |
1548 | * or we have a global mapping already but the cacheability flags have |
||
1549 | * changed, set the global PTEs. |
||
1550 | * |
||
1551 | * If there is an aliasing PPGTT it is anecdotally faster, so use that |
||
1552 | * instead if none of the above hold true. |
||
1553 | * |
||
1554 | * NB: A global mapping should only be needed for special regions like |
||
1555 | * "gtt mappable", SNB errata, or if specified via special execbuf |
||
1556 | * flags. At all other times, the GPU will use the aliasing PPGTT. |
||
1557 | */ |
||
1558 | if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { |
||
1559 | if (!obj->has_global_gtt_mapping || |
||
1560 | (cache_level != obj->cache_level)) { |
||
1561 | vma->vm->insert_entries(vma->vm, obj->pages, |
||
1562 | vma->node.start, |
||
1563 | cache_level, flags); |
||
1564 | obj->has_global_gtt_mapping = 1; |
||
1565 | } |
||
1566 | } |
||
1567 | |||
1568 | if (dev_priv->mm.aliasing_ppgtt && |
||
1569 | (!obj->has_aliasing_ppgtt_mapping || |
||
1570 | (cache_level != obj->cache_level))) { |
||
1571 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
||
1572 | appgtt->base.insert_entries(&appgtt->base, |
||
1573 | vma->obj->pages, |
||
1574 | vma->node.start, |
||
1575 | cache_level, flags); |
||
1576 | vma->obj->has_aliasing_ppgtt_mapping = 1; |
||
1577 | } |
||
2332 | Serge | 1578 | } |
1579 | |||
5060 | serge | 1580 | static void ggtt_unbind_vma(struct i915_vma *vma) |
2332 | Serge | 1581 | { |
5060 | serge | 1582 | struct drm_device *dev = vma->vm->dev; |
3480 | Serge | 1583 | struct drm_i915_private *dev_priv = dev->dev_private; |
5060 | serge | 1584 | struct drm_i915_gem_object *obj = vma->obj; |
3480 | Serge | 1585 | |
5060 | serge | 1586 | if (obj->has_global_gtt_mapping) { |
1587 | vma->vm->clear_range(vma->vm, |
||
1588 | vma->node.start, |
||
1589 | obj->base.size, |
||
4280 | Serge | 1590 | true); |
5060 | serge | 1591 | obj->has_global_gtt_mapping = 0; |
1592 | } |
||
3031 | serge | 1593 | |
5060 | serge | 1594 | if (obj->has_aliasing_ppgtt_mapping) { |
1595 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
||
1596 | appgtt->base.clear_range(&appgtt->base, |
||
1597 | vma->node.start, |
||
1598 | obj->base.size, |
||
1599 | true); |
||
1600 | obj->has_aliasing_ppgtt_mapping = 0; |
||
1601 | } |
||
3031 | serge | 1602 | } |
1603 | |||
1604 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) |
||
1605 | { |
||
2344 | Serge | 1606 | struct drm_device *dev = obj->base.dev; |
1607 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1608 | bool interruptible; |
||
1609 | |||
1610 | interruptible = do_idling(dev_priv); |
||
1611 | |||
3480 | Serge | 1612 | if (!obj->has_dma_mapping) |
1613 | dma_unmap_sg(&dev->pdev->dev, |
||
1614 | obj->pages->sgl, obj->pages->nents, |
||
1615 | PCI_DMA_BIDIRECTIONAL); |
||
2332 | Serge | 1616 | |
3031 | serge | 1617 | undo_idling(dev_priv, interruptible); |
1618 | } |
||
1619 | |||
1620 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
||
1621 | unsigned long color, |
||
1622 | unsigned long *start, |
||
1623 | unsigned long *end) |
||
1624 | { |
||
1625 | if (node->color != color) |
||
1626 | *start += 4096; |
||
1627 | |||
1628 | if (!list_empty(&node->node_list)) { |
||
1629 | node = list_entry(node->node_list.next, |
||
1630 | struct drm_mm_node, |
||
1631 | node_list); |
||
1632 | if (node->allocated && node->color != color) |
||
1633 | *end -= 4096; |
||
2332 | Serge | 1634 | } |
3031 | serge | 1635 | } |
4560 | Serge | 1636 | |
3480 | Serge | 1637 | void i915_gem_setup_global_gtt(struct drm_device *dev, |
3031 | serge | 1638 | unsigned long start, |
1639 | unsigned long mappable_end, |
||
1640 | unsigned long end) |
||
1641 | { |
||
3480 | Serge | 1642 | /* Let GEM Manage all of the aperture. |
1643 | * |
||
1644 | * However, leave one page at the end still bound to the scratch page. |
||
1645 | * There are a number of places where the hardware apparently prefetches |
||
1646 | * past the end of the object, and we've seen multiple hangs with the |
||
1647 | * GPU head pointer stuck in a batchbuffer bound at the last page of the |
||
1648 | * aperture. One page should be enough to keep any prefetching inside |
||
1649 | * of the aperture. |
||
1650 | */ |
||
4104 | Serge | 1651 | struct drm_i915_private *dev_priv = dev->dev_private; |
1652 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
||
3480 | Serge | 1653 | struct drm_mm_node *entry; |
1654 | struct drm_i915_gem_object *obj; |
||
1655 | unsigned long hole_start, hole_end; |
||
3031 | serge | 1656 | |
3480 | Serge | 1657 | BUG_ON(mappable_end > end); |
1658 | |||
1659 | /* Subtract the guard page ... */ |
||
4104 | Serge | 1660 | drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); |
3031 | serge | 1661 | if (!HAS_LLC(dev)) |
4104 | Serge | 1662 | dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; |
3031 | serge | 1663 | |
3480 | Serge | 1664 | /* Mark any preallocated objects as occupied */ |
4104 | Serge | 1665 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
1666 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
||
1667 | int ret; |
||
1668 | DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", |
||
1669 | i915_gem_obj_ggtt_offset(obj), obj->base.size); |
||
3031 | serge | 1670 | |
4104 | Serge | 1671 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
1672 | ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); |
||
1673 | if (ret) |
||
1674 | DRM_DEBUG_KMS("Reservation failed\n"); |
||
3480 | Serge | 1675 | obj->has_global_gtt_mapping = 1; |
1676 | } |
||
1677 | |||
4104 | Serge | 1678 | dev_priv->gtt.base.start = start; |
1679 | dev_priv->gtt.base.total = end - start; |
||
3480 | Serge | 1680 | |
1681 | /* Clear any non-preallocated blocks */ |
||
4104 | Serge | 1682 | drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { |
3480 | Serge | 1683 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
1684 | hole_start, hole_end); |
||
5060 | serge | 1685 | ggtt_vm->clear_range(ggtt_vm, hole_start, |
1686 | hole_end - hole_start, true); |
||
3480 | Serge | 1687 | } |
1688 | |||
1689 | /* And finally clear the reserved guard page */ |
||
5060 | serge | 1690 | ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); |
2332 | Serge | 1691 | } |
3243 | Serge | 1692 | |
3480 | Serge | 1693 | void i915_gem_init_global_gtt(struct drm_device *dev) |
1694 | { |
||
1695 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1696 | unsigned long gtt_size, mappable_size; |
||
1697 | |||
4104 | Serge | 1698 | gtt_size = dev_priv->gtt.base.total; |
3480 | Serge | 1699 | mappable_size = dev_priv->gtt.mappable_end; |
1700 | |||
4280 | Serge | 1701 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
3480 | Serge | 1702 | } |
1703 | |||
3243 | Serge | 1704 | static int setup_scratch_page(struct drm_device *dev) |
1705 | { |
||
1706 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1707 | struct page *page; |
||
1708 | dma_addr_t dma_addr; |
||
1709 | |||
1710 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
||
1711 | if (page == NULL) |
||
1712 | return -ENOMEM; |
||
4104 | Serge | 1713 | get_page(page); |
3480 | Serge | 1714 | set_pages_uc(page, 1); |
3243 | Serge | 1715 | |
1716 | #ifdef CONFIG_INTEL_IOMMU |
||
1717 | dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, |
||
1718 | PCI_DMA_BIDIRECTIONAL); |
||
1719 | if (pci_dma_mapping_error(dev->pdev, dma_addr)) |
||
1720 | return -EINVAL; |
||
1721 | #else |
||
1722 | dma_addr = page_to_phys(page); |
||
1723 | #endif |
||
4104 | Serge | 1724 | dev_priv->gtt.base.scratch.page = page; |
1725 | dev_priv->gtt.base.scratch.addr = dma_addr; |
||
3243 | Serge | 1726 | |
1727 | return 0; |
||
1728 | } |
||
1729 | |||
3480 | Serge | 1730 | static void teardown_scratch_page(struct drm_device *dev) |
1731 | { |
||
1732 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
4104 | Serge | 1733 | struct page *page = dev_priv->gtt.base.scratch.page; |
1734 | |||
1735 | set_pages_wb(page, 1); |
||
1736 | pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, |
||
3480 | Serge | 1737 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
4104 | Serge | 1738 | put_page(page); |
1739 | __free_page(page); |
||
3480 | Serge | 1740 | } |
3243 | Serge | 1741 | |
1742 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
||
1743 | { |
||
1744 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
||
1745 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; |
||
1746 | return snb_gmch_ctl << 20; |
||
1747 | } |
||
1748 | |||
4560 | Serge | 1749 | static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
1750 | { |
||
1751 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; |
||
1752 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
||
1753 | if (bdw_gmch_ctl) |
||
1754 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
||
1755 | |||
5060 | serge | 1756 | #ifdef CONFIG_X86_32 |
1757 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ |
||
1758 | if (bdw_gmch_ctl > 4) |
||
1759 | bdw_gmch_ctl = 4; |
||
1760 | #endif |
||
1761 | |||
4560 | Serge | 1762 | return bdw_gmch_ctl << 20; |
1763 | } |
||
1764 | |||
5060 | serge | 1765 | static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
1766 | { |
||
1767 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; |
||
1768 | gmch_ctrl &= SNB_GMCH_GGMS_MASK; |
||
1769 | |||
1770 | if (gmch_ctrl) |
||
1771 | return 1 << (20 + gmch_ctrl); |
||
1772 | |||
1773 | return 0; |
||
1774 | } |
||
1775 | |||
3480 | Serge | 1776 | static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
3243 | Serge | 1777 | { |
1778 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
||
1779 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
||
1780 | return snb_gmch_ctl << 25; /* 32 MB units */ |
||
1781 | } |
||
1782 | |||
4560 | Serge | 1783 | static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) |
1784 | { |
||
1785 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
||
1786 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; |
||
1787 | return bdw_gmch_ctl << 25; /* 32 MB units */ |
||
1788 | } |
||
1789 | |||
5060 | serge | 1790 | static size_t chv_get_stolen_size(u16 gmch_ctrl) |
1791 | { |
||
1792 | gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; |
||
1793 | gmch_ctrl &= SNB_GMCH_GMS_MASK; |
||
1794 | |||
1795 | /* |
||
1796 | * 0x0 to 0x10: 32MB increments starting at 0MB |
||
1797 | * 0x11 to 0x16: 4MB increments starting at 8MB |
||
1798 | * 0x17 to 0x1d: 4MB increments start at 36MB |
||
1799 | */ |
||
1800 | if (gmch_ctrl < 0x11) |
||
1801 | return gmch_ctrl << 25; |
||
1802 | else if (gmch_ctrl < 0x17) |
||
1803 | return (gmch_ctrl - 0x11 + 2) << 22; |
||
1804 | else |
||
1805 | return (gmch_ctrl - 0x17 + 9) << 22; |
||
1806 | } |
||
1807 | |||
4560 | Serge | 1808 | static int ggtt_probe_common(struct drm_device *dev, |
1809 | size_t gtt_size) |
||
1810 | { |
||
1811 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1812 | phys_addr_t gtt_phys_addr; |
||
1813 | int ret; |
||
1814 | |||
1815 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
||
1816 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + |
||
1817 | (pci_resource_len(dev->pdev, 0) / 2); |
||
1818 | |||
1819 | dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); |
||
1820 | if (!dev_priv->gtt.gsm) { |
||
1821 | DRM_ERROR("Failed to map the gtt page table\n"); |
||
1822 | return -ENOMEM; |
||
1823 | } |
||
1824 | |||
1825 | ret = setup_scratch_page(dev); |
||
1826 | if (ret) { |
||
1827 | DRM_ERROR("Scratch setup failed\n"); |
||
1828 | /* iounmap will also get called at remove, but meh */ |
||
1829 | iounmap(dev_priv->gtt.gsm); |
||
1830 | } |
||
1831 | |||
1832 | return ret; |
||
1833 | } |
||
1834 | |||
1835 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
||
1836 | * bits. When using advanced contexts each context stores its own PAT, but |
||
1837 | * writing this data shouldn't be harmful even in those cases. */ |
||
5060 | serge | 1838 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) |
4560 | Serge | 1839 | { |
1840 | uint64_t pat; |
||
1841 | |||
1842 | pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ |
||
1843 | GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ |
||
1844 | GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ |
||
1845 | GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ |
||
1846 | GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | |
||
1847 | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | |
||
1848 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | |
||
1849 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
||
1850 | |||
1851 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b |
||
1852 | * write would work. */ |
||
1853 | I915_WRITE(GEN8_PRIVATE_PAT, pat); |
||
1854 | I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); |
||
1855 | } |
||
1856 | |||
5060 | serge | 1857 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) |
1858 | { |
||
1859 | uint64_t pat; |
||
1860 | |||
1861 | /* |
||
1862 | * Map WB on BDW to snooped on CHV. |
||
1863 | * |
||
1864 | * Only the snoop bit has meaning for CHV, the rest is |
||
1865 | * ignored. |
||
1866 | * |
||
1867 | * Note that the harware enforces snooping for all page |
||
1868 | * table accesses. The snoop bit is actually ignored for |
||
1869 | * PDEs. |
||
1870 | */ |
||
1871 | pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | |
||
1872 | GEN8_PPAT(1, 0) | |
||
1873 | GEN8_PPAT(2, 0) | |
||
1874 | GEN8_PPAT(3, 0) | |
||
1875 | GEN8_PPAT(4, CHV_PPAT_SNOOP) | |
||
1876 | GEN8_PPAT(5, CHV_PPAT_SNOOP) | |
||
1877 | GEN8_PPAT(6, CHV_PPAT_SNOOP) | |
||
1878 | GEN8_PPAT(7, CHV_PPAT_SNOOP); |
||
1879 | |||
1880 | I915_WRITE(GEN8_PRIVATE_PAT, pat); |
||
1881 | I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); |
||
1882 | } |
||
1883 | |||
4560 | Serge | 1884 | static int gen8_gmch_probe(struct drm_device *dev, |
1885 | size_t *gtt_total, |
||
1886 | size_t *stolen, |
||
1887 | phys_addr_t *mappable_base, |
||
1888 | unsigned long *mappable_end) |
||
1889 | { |
||
1890 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1891 | unsigned int gtt_size; |
||
1892 | u16 snb_gmch_ctl; |
||
1893 | int ret; |
||
1894 | |||
1895 | /* TODO: We're not aware of mappable constraints on gen8 yet */ |
||
1896 | *mappable_base = pci_resource_start(dev->pdev, 2); |
||
1897 | *mappable_end = pci_resource_len(dev->pdev, 2); |
||
1898 | |||
1899 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) |
||
1900 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); |
||
1901 | |||
1902 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
||
1903 | |||
5060 | serge | 1904 | if (IS_CHERRYVIEW(dev)) { |
1905 | *stolen = chv_get_stolen_size(snb_gmch_ctl); |
||
1906 | gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); |
||
1907 | } else { |
||
4560 | Serge | 1908 | *stolen = gen8_get_stolen_size(snb_gmch_ctl); |
5060 | serge | 1909 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
1910 | } |
||
4560 | Serge | 1911 | |
1912 | *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; |
||
1913 | |||
5060 | serge | 1914 | if (IS_CHERRYVIEW(dev)) |
1915 | chv_setup_private_ppat(dev_priv); |
||
1916 | else |
||
1917 | bdw_setup_private_ppat(dev_priv); |
||
4560 | Serge | 1918 | |
1919 | ret = ggtt_probe_common(dev, gtt_size); |
||
1920 | |||
1921 | dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; |
||
1922 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; |
||
1923 | |||
1924 | return ret; |
||
1925 | } |
||
1926 | |||
3480 | Serge | 1927 | static int gen6_gmch_probe(struct drm_device *dev, |
1928 | size_t *gtt_total, |
||
1929 | size_t *stolen, |
||
1930 | phys_addr_t *mappable_base, |
||
1931 | unsigned long *mappable_end) |
||
3243 | Serge | 1932 | { |
1933 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
3480 | Serge | 1934 | unsigned int gtt_size; |
3243 | Serge | 1935 | u16 snb_gmch_ctl; |
1936 | int ret; |
||
1937 | |||
3480 | Serge | 1938 | *mappable_base = pci_resource_start(dev->pdev, 2); |
1939 | *mappable_end = pci_resource_len(dev->pdev, 2); |
||
1940 | |||
1941 | /* 64/512MB is the current min/max we actually know of, but this is just |
||
1942 | * a coarse sanity check. |
||
3243 | Serge | 1943 | */ |
3480 | Serge | 1944 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { |
1945 | DRM_ERROR("Unknown GMADR size (%lx)\n", |
||
1946 | dev_priv->gtt.mappable_end); |
||
1947 | return -ENXIO; |
||
3243 | Serge | 1948 | } |
1949 | |||
3480 | Serge | 1950 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) |
1951 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); |
||
1952 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
||
3243 | Serge | 1953 | |
4104 | Serge | 1954 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); |
4560 | Serge | 1955 | |
1956 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); |
||
3746 | Serge | 1957 | *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; |
3243 | Serge | 1958 | |
4560 | Serge | 1959 | ret = ggtt_probe_common(dev, gtt_size); |
3243 | Serge | 1960 | |
4104 | Serge | 1961 | dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; |
1962 | dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; |
||
3480 | Serge | 1963 | |
1964 | return ret; |
||
1965 | } |
||
1966 | |||
4104 | Serge | 1967 | static void gen6_gmch_remove(struct i915_address_space *vm) |
3480 | Serge | 1968 | { |
4104 | Serge | 1969 | |
1970 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
||
5060 | serge | 1971 | |
1972 | if (drm_mm_initialized(&vm->mm)) { |
||
1973 | drm_mm_takedown(&vm->mm); |
||
1974 | list_del(&vm->global_link); |
||
1975 | } |
||
4104 | Serge | 1976 | iounmap(gtt->gsm); |
1977 | teardown_scratch_page(vm->dev); |
||
3480 | Serge | 1978 | } |
1979 | |||
1980 | static int i915_gmch_probe(struct drm_device *dev, |
||
1981 | size_t *gtt_total, |
||
1982 | size_t *stolen, |
||
1983 | phys_addr_t *mappable_base, |
||
1984 | unsigned long *mappable_end) |
||
1985 | { |
||
1986 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1987 | int ret; |
||
1988 | |||
1989 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); |
||
1990 | if (!ret) { |
||
1991 | DRM_ERROR("failed to set up gmch\n"); |
||
1992 | return -EIO; |
||
3243 | Serge | 1993 | } |
1994 | |||
3480 | Serge | 1995 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
3243 | Serge | 1996 | |
3480 | Serge | 1997 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
4104 | Serge | 1998 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
3480 | Serge | 1999 | |
4560 | Serge | 2000 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
2001 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
||
2002 | |||
3243 | Serge | 2003 | return 0; |
3480 | Serge | 2004 | } |
3243 | Serge | 2005 | |
4104 | Serge | 2006 | static void i915_gmch_remove(struct i915_address_space *vm) |
3480 | Serge | 2007 | { |
4560 | Serge | 2008 | // intel_gmch_remove(); |
3480 | Serge | 2009 | } |
2010 | |||
2011 | int i915_gem_gtt_init(struct drm_device *dev) |
||
2012 | { |
||
2013 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
2014 | struct i915_gtt *gtt = &dev_priv->gtt; |
||
2015 | int ret; |
||
2016 | |||
2017 | if (INTEL_INFO(dev)->gen <= 5) { |
||
4104 | Serge | 2018 | gtt->gtt_probe = i915_gmch_probe; |
2019 | gtt->base.cleanup = i915_gmch_remove; |
||
4560 | Serge | 2020 | } else if (INTEL_INFO(dev)->gen < 8) { |
4104 | Serge | 2021 | gtt->gtt_probe = gen6_gmch_probe; |
2022 | gtt->base.cleanup = gen6_gmch_remove; |
||
2023 | if (IS_HASWELL(dev) && dev_priv->ellc_size) |
||
2024 | gtt->base.pte_encode = iris_pte_encode; |
||
2025 | else if (IS_HASWELL(dev)) |
||
2026 | gtt->base.pte_encode = hsw_pte_encode; |
||
2027 | else if (IS_VALLEYVIEW(dev)) |
||
2028 | gtt->base.pte_encode = byt_pte_encode; |
||
2029 | else if (INTEL_INFO(dev)->gen >= 7) |
||
2030 | gtt->base.pte_encode = ivb_pte_encode; |
||
2031 | else |
||
2032 | gtt->base.pte_encode = snb_pte_encode; |
||
4560 | Serge | 2033 | } else { |
2034 | dev_priv->gtt.gtt_probe = gen8_gmch_probe; |
||
2035 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; |
||
3480 | Serge | 2036 | } |
2037 | |||
4104 | Serge | 2038 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
2039 | >t->mappable_base, >t->mappable_end); |
||
3480 | Serge | 2040 | if (ret) |
4104 | Serge | 2041 | return ret; |
3480 | Serge | 2042 | |
4104 | Serge | 2043 | gtt->base.dev = dev; |
2044 | |||
3480 | Serge | 2045 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
2046 | DRM_INFO("Memory usable by graphics device = %zdM\n", |
||
4104 | Serge | 2047 | gtt->base.total >> 20); |
2048 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); |
||
2049 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
||
5060 | serge | 2050 | #ifdef CONFIG_INTEL_IOMMU |
2051 | if (intel_iommu_gfx_mapped) |
||
2052 | DRM_INFO("VT-d active for gfx access\n"); |
||
2053 | #endif |
||
2054 | /* |
||
2055 | * i915.enable_ppgtt is read-only, so do an early pass to validate the |
||
2056 | * user's requested state against the hardware/driver capabilities. We |
||
2057 | * do this now so that we can print out any log messages once rather |
||
2058 | * than every time we check intel_enable_ppgtt(). |
||
2059 | */ |
||
2060 | i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); |
||
2061 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); |
||
3480 | Serge | 2062 | |
2063 | return 0; |
||
3243 | Serge | 2064 | } |
2065 | |||
5060 | serge | 2066 | static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, |
2067 | struct i915_address_space *vm) |
||
2068 | { |
||
2069 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
||
2070 | if (vma == NULL) |
||
2071 | return ERR_PTR(-ENOMEM); |
||
2072 | |||
2073 | INIT_LIST_HEAD(&vma->vma_link); |
||
2074 | INIT_LIST_HEAD(&vma->mm_list); |
||
2075 | INIT_LIST_HEAD(&vma->exec_list); |
||
2076 | vma->vm = vm; |
||
2077 | vma->obj = obj; |
||
2078 | |||
2079 | switch (INTEL_INFO(vm->dev)->gen) { |
||
2080 | case 8: |
||
2081 | case 7: |
||
2082 | case 6: |
||
2083 | if (i915_is_ggtt(vm)) { |
||
2084 | vma->unbind_vma = ggtt_unbind_vma; |
||
2085 | vma->bind_vma = ggtt_bind_vma; |
||
2086 | } else { |
||
2087 | vma->unbind_vma = ppgtt_unbind_vma; |
||
2088 | vma->bind_vma = ppgtt_bind_vma; |
||
2089 | } |
||
2090 | break; |
||
2091 | case 5: |
||
2092 | case 4: |
||
2093 | case 3: |
||
2094 | case 2: |
||
2095 | BUG_ON(!i915_is_ggtt(vm)); |
||
2096 | vma->unbind_vma = i915_ggtt_unbind_vma; |
||
2097 | vma->bind_vma = i915_ggtt_bind_vma; |
||
2098 | break; |
||
2099 | default: |
||
2100 | BUG(); |
||
2101 | } |
||
2102 | |||
2103 | /* Keep GGTT vmas first to make debug easier */ |
||
2104 | if (i915_is_ggtt(vm)) |
||
2105 | list_add(&vma->vma_link, &obj->vma_list); |
||
2106 | else |
||
2107 | list_add_tail(&vma->vma_link, &obj->vma_list); |
||
2108 | |||
2109 | return vma; |
||
2110 | } |
||
2111 | |||
2112 | struct i915_vma * |
||
2113 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
||
2114 | struct i915_address_space *vm) |
||
2115 | { |
||
2116 | struct i915_vma *vma; |
||
2117 | |||
2118 | vma = i915_gem_obj_to_vma(obj, vm); |
||
2119 | if (!vma) |
||
2120 | vma = __i915_gem_vma_create(obj, vm); |
||
2121 | |||
2122 | return vma; |
||
2123 | } |
||
2124 | |||
3243 | Serge | 2125 | struct scatterlist *sg_next(struct scatterlist *sg) |
2126 | { |
||
2127 | if (sg_is_last(sg)) |
||
2128 | return NULL; |
||
2129 | |||
2130 | sg++; |
||
2131 | if (unlikely(sg_is_chain(sg))) |
||
2132 | sg = sg_chain_ptr(sg); |
||
2133 | |||
2134 | return sg; |
||
2135 | } |
||
2136 | |||
2137 | |||
2138 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
||
5060 | serge | 2139 | bool skip_first_chunk, sg_free_fn *free_fn) |
3243 | Serge | 2140 | { |
2141 | struct scatterlist *sgl, *next; |
||
2142 | |||
2143 | if (unlikely(!table->sgl)) |
||
5060 | serge | 2144 | return; |
3243 | Serge | 2145 | |
2146 | sgl = table->sgl; |
||
2147 | while (table->orig_nents) { |
||
2148 | unsigned int alloc_size = table->orig_nents; |
||
2149 | unsigned int sg_size; |
||
2150 | |||
2151 | /* |
||
2152 | * If we have more than max_ents segments left, |
||
2153 | * then assign 'next' to the sg table after the current one. |
||
2154 | * sg_size is then one less than alloc size, since the last |
||
2155 | * element is the chain pointer. |
||
2156 | */ |
||
2157 | if (alloc_size > max_ents) { |
||
5060 | serge | 2158 | next = sg_chain_ptr(&sgl[max_ents - 1]); |
2159 | alloc_size = max_ents; |
||
2160 | sg_size = alloc_size - 1; |
||
3243 | Serge | 2161 | } else { |
5060 | serge | 2162 | sg_size = alloc_size; |
2163 | next = NULL; |
||
3243 | Serge | 2164 | } |
2165 | |||
2166 | table->orig_nents -= sg_size; |
||
5060 | serge | 2167 | if (!skip_first_chunk) { |
2168 | kfree(sgl); |
||
2169 | skip_first_chunk = false; |
||
2170 | } |
||
3243 | Serge | 2171 | sgl = next; |
2172 | } |
||
2173 | |||
2174 | table->sgl = NULL; |
||
2175 | } |
||
2176 | void sg_free_table(struct sg_table *table) |
||
2177 | { |
||
5060 | serge | 2178 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL); |
3243 | Serge | 2179 | } |
2180 | |||
2181 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
||
2182 | { |
||
2183 | struct scatterlist *sg, *prv; |
||
2184 | unsigned int left; |
||
2185 | unsigned int max_ents = SG_MAX_SINGLE_ALLOC; |
||
2186 | |||
2187 | #ifndef ARCH_HAS_SG_CHAIN |
||
2188 | BUG_ON(nents > max_ents); |
||
2189 | #endif |
||
2190 | |||
2191 | memset(table, 0, sizeof(*table)); |
||
2192 | |||
2193 | left = nents; |
||
2194 | prv = NULL; |
||
2195 | do { |
||
2196 | unsigned int sg_size, alloc_size = left; |
||
2197 | |||
2198 | if (alloc_size > max_ents) { |
||
2199 | alloc_size = max_ents; |
||
2200 | sg_size = alloc_size - 1; |
||
2201 | } else |
||
2202 | sg_size = alloc_size; |
||
2203 | |||
2204 | left -= sg_size; |
||
2205 | |||
2206 | sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask); |
||
2207 | if (unlikely(!sg)) { |
||
2208 | /* |
||
2209 | * Adjust entry count to reflect that the last |
||
2210 | * entry of the previous table won't be used for |
||
2211 | * linkage. Without this, sg_kfree() may get |
||
2212 | * confused. |
||
2213 | */ |
||
2214 | if (prv) |
||
2215 | table->nents = ++table->orig_nents; |
||
2216 | |||
2217 | goto err; |
||
2218 | } |
||
2219 | |||
2220 | sg_init_table(sg, alloc_size); |
||
2221 | table->nents = table->orig_nents += sg_size; |
||
2222 | |||
2223 | /* |
||
2224 | * If this is the first mapping, assign the sg table header. |
||
2225 | * If this is not the first mapping, chain previous part. |
||
2226 | */ |
||
2227 | if (prv) |
||
2228 | sg_chain(prv, max_ents, sg); |
||
2229 | else |
||
2230 | table->sgl = sg; |
||
2231 | |||
2232 | /* |
||
2233 | * If no more entries after this one, mark the end |
||
2234 | */ |
||
2235 | if (!left) |
||
2236 | sg_mark_end(&sg[sg_size - 1]); |
||
2237 | |||
2238 | prv = sg; |
||
2239 | } while (left); |
||
2240 | |||
2241 | return 0; |
||
2242 | |||
2243 | err: |
||
5060 | serge | 2244 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL); |
3243 | Serge | 2245 | |
2246 | return -ENOMEM; |
||
2247 | } |
||
2248 | |||
2249 | |||
2250 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
||
2251 | { |
||
2252 | memset(sgl, 0, sizeof(*sgl) * nents); |
||
2253 | #ifdef CONFIG_DEBUG_SG |
||
2254 | { |
||
2255 | unsigned int i; |
||
2256 | for (i = 0; i < nents; i++) |
||
2257 | sgl[i].sg_magic = SG_MAGIC; |
||
2258 | } |
||
2259 | #endif |
||
2260 | sg_mark_end(&sgl[nents - 1]); |
||
2261 | } |
||
2262 | |||
3746 | Serge | 2263 | |
2264 | void __sg_page_iter_start(struct sg_page_iter *piter, |
||
2265 | struct scatterlist *sglist, unsigned int nents, |
||
2266 | unsigned long pgoffset) |
||
2267 | { |
||
2268 | piter->__pg_advance = 0; |
||
2269 | piter->__nents = nents; |
||
2270 | |||
2271 | piter->sg = sglist; |
||
2272 | piter->sg_pgoffset = pgoffset; |
||
2273 | } |
||
2274 | |||
2275 | static int sg_page_count(struct scatterlist *sg) |
||
2276 | { |
||
2277 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
||
2278 | } |
||
2279 | |||
2280 | bool __sg_page_iter_next(struct sg_page_iter *piter) |
||
2281 | { |
||
2282 | if (!piter->__nents || !piter->sg) |
||
2283 | return false; |
||
2284 | |||
2285 | piter->sg_pgoffset += piter->__pg_advance; |
||
2286 | piter->__pg_advance = 1; |
||
2287 | |||
2288 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
||
2289 | piter->sg_pgoffset -= sg_page_count(piter->sg); |
||
2290 | piter->sg = sg_next(piter->sg); |
||
2291 | if (!--piter->__nents || !piter->sg) |
||
2292 | return false; |
||
2293 | } |
||
2294 | |||
2295 | return true; |
||
2296 | } |
||
2297 | EXPORT_SYMBOL(__sg_page_iter_next);>>=>><>20))))><20))))>20)><20)>>><>><>><>>><>>><>><>><>><>><>><>><>>>><>>>><>>>>>>>>>><>>30)); |
||
2298 | |||
2299 |