Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 5354
Line 21... Line 21...
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22
 * IN THE SOFTWARE.
22
 * IN THE SOFTWARE.
23
 *
23
 *
24
 */
24
 */
Line 25... Line -...
25
 
-
 
26
 
25
 
27
#define AGP_NORMAL_MEMORY 0
-
 
28
 
-
 
29
#define AGP_USER_TYPES (1 << 16)
-
 
30
#define AGP_USER_MEMORY (AGP_USER_TYPES)
-
 
31
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
-
 
32
 
26
#include 
33
#include 
27
#include 
34
#include 
28
#include 
35
#include "i915_drv.h"
29
#include "i915_drv.h"
36
#include "i915_trace.h"
30
#include "i915_trace.h"
Line -... Line 31...
-
 
31
#include "intel_drv.h"
-
 
32
 
37
#include "intel_drv.h"
33
#include 
38
 
34
 
Line 39... Line 35...
39
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
35
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
40
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
36
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
41
 
37
 
42
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
38
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
Line 43... Line 39...
43
{
39
{
-
 
40
	bool has_aliasing_ppgtt;
44
	if (i915.enable_ppgtt == 0)
41
	bool has_full_ppgtt;
-
 
42
 
Line 45... Line -...
45
		return false;
-
 
46
 
43
	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
47
	if (i915.enable_ppgtt == 1 && full)
-
 
48
		return false;
44
	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
-
 
45
	if (IS_GEN8(dev))
49
 
46
		has_full_ppgtt = false; /* XXX why? */
-
 
47
 
50
	return true;
48
	/*
51
}
49
	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
Line 52... Line 50...
52
 
50
	 * execlists, the sole mechanism available to submit work.
53
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
51
	 */
Line 54... Line 52...
54
{
52
	if (INTEL_INFO(dev)->gen < 9 &&
55
	if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
53
	    (enable_ppgtt == 0 || !has_aliasing_ppgtt))
Line 56... Line 54...
56
		return 0;
54
		return 0;
57
 
55
 
58
	if (enable_ppgtt == 1)
56
	if (enable_ppgtt == 1)
Line 74... Line 72...
74
	    dev->pdev->revision < 0xb) {
72
	    dev->pdev->revision < 0xb) {
75
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
73
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
76
		return 0;
74
		return 0;
77
	}
75
	}
Line 78... Line 76...
78
 
76
 
79
	return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
77
	return has_aliasing_ppgtt ? 1 : 0;
Line 80... Line 78...
80
}
78
}
81
 
79
 
82
 
80
 
83
static void ppgtt_bind_vma(struct i915_vma *vma,
81
static void ppgtt_bind_vma(struct i915_vma *vma,
84
			   enum i915_cache_level cache_level,
-
 
Line 85... Line 82...
85
			   u32 flags);
82
			   enum i915_cache_level cache_level,
86
static void ppgtt_unbind_vma(struct i915_vma *vma);
83
			   u32 flags);
87
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
84
static void ppgtt_unbind_vma(struct i915_vma *vma);
88
 
85
 
Line 172... Line 169...
172
				     bool valid, u32 flags)
169
				     bool valid, u32 flags)
173
{
170
{
174
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
171
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
175
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
172
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
Line 176... Line -...
176
 
-
 
177
	/* Mark the page as writeable.  Other platforms don't have a
-
 
178
	 * setting for read-only/writable, so this matches that behavior.
-
 
179
	 */
173
 
180
	if (!(flags & PTE_READ_ONLY))
174
	if (!(flags & PTE_READ_ONLY))
Line 181... Line 175...
181
	pte |= BYT_PTE_WRITEABLE;
175
	pte |= BYT_PTE_WRITEABLE;
182
 
176
 
Line 220... Line 214...
220
	return pte;
214
	return pte;
221
}
215
}
Line 222... Line 216...
222
 
216
 
223
/* Broadwell Page Directory Pointer Descriptors */
217
/* Broadwell Page Directory Pointer Descriptors */
224
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
218
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
225
			   uint64_t val, bool synchronous)
219
			   uint64_t val)
226
{
-
 
227
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
220
{
Line 228... Line 221...
228
	int ret;
221
	int ret;
Line 229... Line -...
229
 
-
 
230
	BUG_ON(entry >= 4);
-
 
231
 
-
 
232
	if (synchronous) {
-
 
233
		I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
-
 
234
		I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
-
 
235
		return 0;
222
 
236
	}
223
	BUG_ON(entry >= 4);
237
 
224
 
Line 238... Line 225...
238
	ret = intel_ring_begin(ring, 6);
225
	ret = intel_ring_begin(ring, 6);
Line 249... Line 236...
249
 
236
 
250
	return 0;
237
	return 0;
Line 251... Line 238...
251
}
238
}
252
 
239
 
253
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-
 
254
			  struct intel_engine_cs *ring,
240
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
255
			  bool synchronous)
241
			  struct intel_engine_cs *ring)
Line 256... Line 242...
256
{
242
{
257
	int i, ret;
243
	int i, ret;
Line 258... Line 244...
258
 
244
 
259
	/* bit of a hack to find the actual last used pd */
245
	/* bit of a hack to find the actual last used pd */
260
	int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
246
	int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
261
 
247
 
262
	for (i = used_pd - 1; i >= 0; i--) {
248
	for (i = used_pd - 1; i >= 0; i--) {
263
		dma_addr_t addr = ppgtt->pd_dma_addr[i];
249
		dma_addr_t addr = ppgtt->pd_dma_addr[i];
Line 264... Line 250...
264
		ret = gen8_write_pdp(ring, i, addr, synchronous);
250
		ret = gen8_write_pdp(ring, i, addr);
Line 281... Line 267...
281
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
267
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
282
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
268
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
283
	unsigned num_entries = length >> PAGE_SHIFT;
269
	unsigned num_entries = length >> PAGE_SHIFT;
284
	unsigned last_pte, i;
270
	unsigned last_pte, i;
Line 285... Line -...
285
 
-
 
286
    pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096);
-
 
287
    if(pt_vaddr == NULL)
-
 
288
        return;
-
 
289
 
271
 
290
    scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
272
    scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
Line 291... Line 273...
291
                      I915_CACHE_LLC, use_scratch);
273
                      I915_CACHE_LLC, use_scratch);
292
 
274
 
Line 293... Line 275...
293
	while (num_entries) {
275
	while (num_entries) {
294
		struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
276
		struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
295
 
277
 
Line 296... Line 278...
296
		last_pte = pte + num_entries;
278
		last_pte = pte + num_entries;
Line 297... Line 279...
297
		if (last_pte > GEN8_PTES_PER_PAGE)
279
		if (last_pte > GEN8_PTES_PER_PAGE)
298
			last_pte = GEN8_PTES_PER_PAGE;
280
			last_pte = GEN8_PTES_PER_PAGE;
299
 
281
 
300
        MapPage(pt_vaddr,(addr_t)page_table, PG_SW);
282
		pt_vaddr = kmap_atomic(page_table);
Line 301... Line 283...
301
 
283
 
302
		for (i = pte; i < last_pte; i++) {
284
		for (i = pte; i < last_pte; i++) {
-
 
285
			pt_vaddr[i] = scratch_pte;
Line 303... Line 286...
303
			pt_vaddr[i] = scratch_pte;
286
			num_entries--;
304
			num_entries--;
287
		}
305
		}
288
 
306
 
289
		if (!HAS_LLC(ppgtt->base.dev))
307
		if (!HAS_LLC(ppgtt->base.dev))
290
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
308
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
291
		kunmap_atomic(pt_vaddr);
309
 
-
 
310
		pte = 0;
292
 
Line 311... Line 293...
311
		if (++pde == GEN8_PDES_PER_PAGE) {
293
		pte = 0;
312
			pdpe++;
294
		if (++pde == GEN8_PDES_PER_PAGE) {
313
			pde = 0;
295
			pdpe++;
Line 327... Line 309...
327
	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
309
	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
328
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
310
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
329
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
311
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
330
	struct sg_page_iter sg_iter;
312
	struct sg_page_iter sg_iter;
Line 331... Line -...
331
 
-
 
332
    pt_vaddr = AllocKernelSpace(4096);
313
 
333
    if(pt_vaddr == NULL)
-
 
334
        return;
-
 
335
 
-
 
Line 336... Line 314...
336
    MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
314
	pt_vaddr = NULL;
337
 
315
 
338
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
316
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
Line -... Line 317...
-
 
317
		if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
-
 
318
			break;
-
 
319
 
339
		if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
320
		if (pt_vaddr == NULL)
340
			break;
321
			pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
341
 
322
 
342
		pt_vaddr[pte] =
323
		pt_vaddr[pte] =
343
			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
324
			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
344
					cache_level, true);
325
					cache_level, true);
-
 
326
		if (++pte == GEN8_PTES_PER_PAGE) {
-
 
327
			if (!HAS_LLC(ppgtt->base.dev))
345
		if (++pte == GEN8_PTES_PER_PAGE) {
328
				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
346
			if (!HAS_LLC(ppgtt->base.dev))
329
			kunmap_atomic(pt_vaddr);
347
				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
330
			pt_vaddr = NULL;
348
			if (++pde == GEN8_PDES_PER_PAGE) {
331
			if (++pde == GEN8_PDES_PER_PAGE) {
349
				pdpe++;
332
				pdpe++;
350
				pde = 0;
-
 
351
			}
333
				pde = 0;
352
			pte = 0;
334
			}
-
 
335
			pte = 0;
-
 
336
		}
-
 
337
	}
353
            MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
338
	if (pt_vaddr) {
-
 
339
		if (!HAS_LLC(ppgtt->base.dev))
354
		}
340
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
Line 355... Line 341...
355
	}
341
		kunmap_atomic(pt_vaddr);
356
    FreeKernelSpace(pt_vaddr);
342
	}
357
}
343
}
Line 407... Line 393...
407
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
393
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
408
{
394
{
409
	struct i915_hw_ppgtt *ppgtt =
395
	struct i915_hw_ppgtt *ppgtt =
410
		container_of(vm, struct i915_hw_ppgtt, base);
396
		container_of(vm, struct i915_hw_ppgtt, base);
Line 411... Line -...
411
 
-
 
412
	list_del(&vm->global_link);
-
 
413
	drm_mm_takedown(&vm->mm);
-
 
414
 
397
 
415
	gen8_ppgtt_unmap_pages(ppgtt);
398
	gen8_ppgtt_unmap_pages(ppgtt);
416
	gen8_ppgtt_free(ppgtt);
399
	gen8_ppgtt_free(ppgtt);
Line 417... Line 400...
417
}
400
}
Line 574... Line 557...
574
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
557
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
575
{
558
{
576
	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
559
	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
577
	const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
560
	const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
578
	int i, j, ret;
561
	int i, j, ret;
579
	gen8_ppgtt_pde_t *pd_vaddr;
-
 
Line 580... Line 562...
580
 
562
 
581
	if (size % (1<<30))
563
	if (size % (1<<30))
Line 582... Line 564...
582
		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
564
		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
Line 607... Line 589...
607
	 *
589
	 *
608
	 * For now, the PPGTT helper functions all require that the PDEs are
590
	 * For now, the PPGTT helper functions all require that the PDEs are
609
	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
591
	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
610
	 * will never need to touch the PDEs again.
592
	 * will never need to touch the PDEs again.
611
	 */
593
	 */
612
 
-
 
613
    pd_vaddr = AllocKernelSpace(4096);
-
 
614
 
-
 
615
    for (i = 0; i < max_pdp; i++) {
594
    for (i = 0; i < max_pdp; i++) {
-
 
595
		gen8_ppgtt_pde_t *pd_vaddr;
616
        MapPage(pd_vaddr,(addr_t)(&ppgtt->pd_pages[i]), 3);
596
		pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
617
		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
597
		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
618
			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
598
			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
619
			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
599
			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
620
						      I915_CACHE_LLC);
600
						      I915_CACHE_LLC);
621
		}
601
		}
622
		if (!HAS_LLC(ppgtt->base.dev))
602
		if (!HAS_LLC(ppgtt->base.dev))
623
			drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
603
			drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
-
 
604
		kunmap_atomic(pd_vaddr);
624
	}
605
	}
625
    FreeKernelSpace(pd_vaddr);
-
 
Line 626... Line -...
626
 
-
 
627
	ppgtt->enable = gen8_ppgtt_enable;
606
 
628
	ppgtt->switch_mm = gen8_mm_switch;
607
	ppgtt->switch_mm = gen8_mm_switch;
629
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
608
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
630
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
609
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
631
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
610
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
Line 675... Line 654...
675
 
654
 
676
	return (ppgtt->pd_offset / 64) << 16;
655
	return (ppgtt->pd_offset / 64) << 16;
Line 677... Line 656...
677
}
656
}
678
 
657
 
679
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-
 
680
			 struct intel_engine_cs *ring,
658
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
681
			 bool synchronous)
-
 
682
{
-
 
683
	struct drm_device *dev = ppgtt->base.dev;
659
			 struct intel_engine_cs *ring)
Line 684... Line -...
684
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
685
	int ret;
-
 
686
 
-
 
687
	/* If we're in reset, we can assume the GPU is sufficiently idle to
-
 
688
	 * manually frob these bits. Ideally we could use the ring functions,
-
 
689
	 * except our error handling makes it quite difficult (can't use
-
 
690
	 * intel_ring_begin, ring->flush, or intel_ring_advance)
-
 
691
	 *
-
 
692
	 * FIXME: We should try not to special case reset
-
 
693
	 */
-
 
694
	if (synchronous ||
-
 
695
	    i915_reset_in_progress(&dev_priv->gpu_error)) {
-
 
696
		WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-
 
697
		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-
 
698
		I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-
 
699
		POSTING_READ(RING_PP_DIR_BASE(ring));
-
 
700
		return 0;
660
{
701
	}
661
	int ret;
702
 
662
 
703
	/* NB: TLBs must be flushed and invalidated before a switch */
663
	/* NB: TLBs must be flushed and invalidated before a switch */
Line 719... Line 679...
719
 
679
 
720
	return 0;
680
	return 0;
Line 721... Line 681...
721
}
681
}
722
 
682
 
723
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-
 
724
			  struct intel_engine_cs *ring,
683
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
725
			  bool synchronous)
-
 
726
{
-
 
727
	struct drm_device *dev = ppgtt->base.dev;
684
			  struct intel_engine_cs *ring)
Line 728... Line -...
728
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
729
	int ret;
-
 
730
 
-
 
731
	/* If we're in reset, we can assume the GPU is sufficiently idle to
-
 
732
	 * manually frob these bits. Ideally we could use the ring functions,
-
 
733
	 * except our error handling makes it quite difficult (can't use
-
 
734
	 * intel_ring_begin, ring->flush, or intel_ring_advance)
-
 
735
	 *
-
 
736
	 * FIXME: We should try not to special case reset
-
 
737
	 */
-
 
738
	if (synchronous ||
-
 
739
	    i915_reset_in_progress(&dev_priv->gpu_error)) {
-
 
740
		WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-
 
741
		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-
 
742
		I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-
 
743
		POSTING_READ(RING_PP_DIR_BASE(ring));
-
 
744
		return 0;
685
{
745
	}
686
	int ret;
746
 
687
 
747
	/* NB: TLBs must be flushed and invalidated before a switch */
688
	/* NB: TLBs must be flushed and invalidated before a switch */
Line 770... Line 711...
770
 
711
 
771
	return 0;
712
	return 0;
Line 772... Line 713...
772
}
713
}
773
 
714
 
774
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-
 
775
			  struct intel_engine_cs *ring,
715
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
776
			  bool synchronous)
716
			  struct intel_engine_cs *ring)
777
{
717
{
Line 778... Line -...
778
	struct drm_device *dev = ppgtt->base.dev;
-
 
779
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
Line 780... Line 718...
780
 
718
	struct drm_device *dev = ppgtt->base.dev;
781
	if (!synchronous)
719
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 782... Line 720...
782
		return 0;
720
 
Line 783... Line 721...
783
 
721
 
784
	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
722
	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
Line 785... Line 723...
785
	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
723
	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
786
 
724
 
787
	POSTING_READ(RING_PP_DIR_DCLV(ring));
-
 
788
 
725
	POSTING_READ(RING_PP_DIR_DCLV(ring));
789
	return 0;
726
 
790
}
727
	return 0;
Line 791... Line 728...
791
 
728
}
792
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
729
 
793
{
730
static void gen8_ppgtt_enable(struct drm_device *dev)
794
	struct drm_device *dev = ppgtt->base.dev;
-
 
795
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
796
	struct intel_engine_cs *ring;
-
 
797
	int j, ret;
-
 
798
 
-
 
799
	for_each_ring(ring, dev_priv, j) {
-
 
800
		I915_WRITE(RING_MODE_GEN7(ring),
-
 
801
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
 
802
 
-
 
803
		/* We promise to do a switch later with FULL PPGTT. If this is
731
{
804
		 * aliasing, this is the one and only switch we'll do */
-
 
805
		if (USES_FULL_PPGTT(dev))
-
 
806
			continue;
-
 
807
 
-
 
808
		ret = ppgtt->switch_mm(ppgtt, ring, true);
-
 
809
		if (ret)
-
 
810
			goto err_out;
-
 
811
	}
-
 
812
 
732
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 813... Line 733...
813
	return 0;
733
	struct intel_engine_cs *ring;
814
 
734
	int j;
815
err_out:
-
 
816
	for_each_ring(ring, dev_priv, j)
735
 
817
		I915_WRITE(RING_MODE_GEN7(ring),
736
	for_each_ring(ring, dev_priv, j) {
818
			   _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
737
		I915_WRITE(RING_MODE_GEN7(ring),
819
	return ret;
738
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Line 838... Line 757...
838
			ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
757
			ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
839
		}
758
		}
840
		I915_WRITE(GAM_ECOCHK, ecochk);
759
		I915_WRITE(GAM_ECOCHK, ecochk);
Line 841... Line 760...
841
 
760
 
842
	for_each_ring(ring, dev_priv, i) {
-
 
843
		int ret;
761
	for_each_ring(ring, dev_priv, i) {
844
		/* GFX_MODE is per-ring on gen7+ */
762
		/* GFX_MODE is per-ring on gen7+ */
845
			I915_WRITE(RING_MODE_GEN7(ring),
763
			I915_WRITE(RING_MODE_GEN7(ring),
846
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
 
847
 
-
 
848
		/* We promise to do a switch later with FULL PPGTT. If this is
-
 
849
		 * aliasing, this is the one and only switch we'll do */
-
 
850
		if (USES_FULL_PPGTT(dev))
-
 
851
			continue;
-
 
852
 
-
 
853
		ret = ppgtt->switch_mm(ppgtt, ring, true);
-
 
854
		if (ret)
-
 
855
			return ret;
764
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
856
	}
-
 
857
 
-
 
858
	return 0;
765
	}
Line 859... Line 766...
859
}
766
}
860
 
767
 
861
static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
-
 
862
{
768
static void gen6_ppgtt_enable(struct drm_device *dev)
863
	struct drm_device *dev = ppgtt->base.dev;
-
 
864
	struct drm_i915_private *dev_priv = dev->dev_private;
769
{
865
	struct intel_engine_cs *ring;
-
 
Line 866... Line 770...
866
	uint32_t ecochk, gab_ctl, ecobits;
770
	struct drm_i915_private *dev_priv = dev->dev_private;
867
	int i;
771
	uint32_t ecochk, gab_ctl, ecobits;
868
 
772
 
Line 875... Line 779...
875
 
779
 
876
	ecochk = I915_READ(GAM_ECOCHK);
780
	ecochk = I915_READ(GAM_ECOCHK);
Line 877... Line 781...
877
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
781
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
878
 
-
 
879
	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
 
880
 
-
 
881
	for_each_ring(ring, dev_priv, i) {
-
 
882
		int ret = ppgtt->switch_mm(ppgtt, ring, true);
-
 
883
		if (ret)
-
 
884
			return ret;
-
 
885
	}
-
 
886
 
782
 
Line 887... Line 783...
887
	return 0;
783
	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
888
}
784
}
889
 
785
 
Line 902... Line 798...
902
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
798
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
903
	unsigned last_pte, i;
799
	unsigned last_pte, i;
Line 904... Line 800...
904
 
800
 
Line 905... Line -...
905
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
-
 
906
 
-
 
907
    pt_vaddr = AllocKernelSpace(4096);
-
 
908
 
-
 
909
    if(pt_vaddr == NULL)
-
 
910
        return;
801
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
911
 
802
 
912
	while (num_entries) {
803
	while (num_entries) {
913
            last_pte = first_pte + num_entries;
804
            last_pte = first_pte + num_entries;
Line 914... Line 805...
914
            if (last_pte > I915_PPGTT_PT_ENTRIES)
805
            if (last_pte > I915_PPGTT_PT_ENTRIES)
Line 915... Line 806...
915
                last_pte = I915_PPGTT_PT_ENTRIES;
806
                last_pte = I915_PPGTT_PT_ENTRIES;
916
 
807
 
Line -... Line 808...
-
 
808
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
-
 
809
 
917
            MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
810
            for (i = first_pte; i < last_pte; i++)
918
 
811
                pt_vaddr[i] = scratch_pte;
919
            for (i = first_pte; i < last_pte; i++)
812
 
920
                pt_vaddr[i] = scratch_pte;
-
 
921
 
813
		kunmap_atomic(pt_vaddr);
922
            num_entries -= last_pte - first_pte;
-
 
923
            first_pte = 0;
814
 
Line 924... Line 815...
924
            act_pt++;
815
            num_entries -= last_pte - first_pte;
925
    };
816
            first_pte = 0;
926
 
817
            act_pt++;
Line 938... Line 829...
938
	unsigned first_entry = start >> PAGE_SHIFT;
829
	unsigned first_entry = start >> PAGE_SHIFT;
939
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
830
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
940
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
831
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
941
	struct sg_page_iter sg_iter;
832
	struct sg_page_iter sg_iter;
Line 942... Line -...
942
 
-
 
943
    pt_vaddr = AllocKernelSpace(4096);
-
 
944
 
833
 
945
    if(pt_vaddr == NULL)
-
 
946
        return;
-
 
947
 
-
 
948
    MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
834
	pt_vaddr = NULL;
-
 
835
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
-
 
836
		if (pt_vaddr == NULL)
Line 949... Line 837...
949
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
837
			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
950
 
838
 
951
		pt_vaddr[act_pte] =
839
		pt_vaddr[act_pte] =
Line 952... Line 840...
952
			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
840
			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
-
 
841
				       cache_level, true, flags);
-
 
842
 
953
				       cache_level, true, flags);
843
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
954
 
-
 
955
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
844
			kunmap_atomic(pt_vaddr);
956
			act_pt++;
845
			pt_vaddr = NULL;
957
    		MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
846
			act_pt++;
-
 
847
			act_pte = 0;
958
			act_pte = 0;
848
			}
959
			}
849
		}
Line 960... Line 850...
960
		}
850
	if (pt_vaddr)
961
    FreeKernelSpace(pt_vaddr);
851
		kunmap_atomic(pt_vaddr);
962
}
852
}
Line 986... Line 876...
986
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
876
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
987
{
877
{
988
	struct i915_hw_ppgtt *ppgtt =
878
	struct i915_hw_ppgtt *ppgtt =
989
		container_of(vm, struct i915_hw_ppgtt, base);
879
		container_of(vm, struct i915_hw_ppgtt, base);
Line 990... Line -...
990
 
-
 
991
	list_del(&vm->global_link);
-
 
992
	drm_mm_takedown(&ppgtt->base.mm);
880
 
Line 993... Line 881...
993
	drm_mm_remove_node(&ppgtt->node);
881
	drm_mm_remove_node(&ppgtt->node);
994
 
882
 
995
	gen6_ppgtt_unmap_pages(ppgtt);
883
	gen6_ppgtt_unmap_pages(ppgtt);
Line 1108... Line 996...
1108
	struct drm_i915_private *dev_priv = dev->dev_private;
996
	struct drm_i915_private *dev_priv = dev->dev_private;
1109
	int ret;
997
	int ret;
Line 1110... Line 998...
1110
 
998
 
1111
	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
999
	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
1112
	if (IS_GEN6(dev)) {
-
 
1113
	ppgtt->enable = gen6_ppgtt_enable;
1000
	if (IS_GEN6(dev)) {
1114
		ppgtt->switch_mm = gen6_mm_switch;
1001
		ppgtt->switch_mm = gen6_mm_switch;
1115
	} else if (IS_HASWELL(dev)) {
-
 
1116
		ppgtt->enable = gen7_ppgtt_enable;
1002
	} else if (IS_HASWELL(dev)) {
1117
		ppgtt->switch_mm = hsw_mm_switch;
1003
		ppgtt->switch_mm = hsw_mm_switch;
1118
	} else if (IS_GEN7(dev)) {
-
 
1119
		ppgtt->enable = gen7_ppgtt_enable;
1004
	} else if (IS_GEN7(dev)) {
1120
		ppgtt->switch_mm = gen7_mm_switch;
1005
		ppgtt->switch_mm = gen7_mm_switch;
1121
	} else
1006
	} else
Line 1122... Line 1007...
1122
		BUG();
1007
		BUG();
Line 1145... Line 1030...
1145
 
1030
 
1146
	DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
1031
	DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
1147
			 ppgtt->node.size >> 20,
1032
			 ppgtt->node.size >> 20,
Line -... Line 1033...
-
 
1033
			 ppgtt->node.start / PAGE_SIZE);
-
 
1034
 
-
 
1035
	gen6_write_pdes(ppgtt);
-
 
1036
	DRM_DEBUG("Adding PPGTT at offset %x\n",
1148
			 ppgtt->node.start / PAGE_SIZE);
1037
		  ppgtt->pd_offset << 10);
1149
 
1038
 
Line 1150... Line 1039...
1150
	return 0;
1039
	return 0;
1151
}
1040
}
1152
 
1041
 
1153
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
-
 
Line 1154... Line 1042...
1154
{
1042
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1155
	struct drm_i915_private *dev_priv = dev->dev_private;
1043
{
Line 1156... Line 1044...
1156
	int ret = 0;
1044
	struct drm_i915_private *dev_priv = dev->dev_private;
1157
 
1045
 
1158
	ppgtt->base.dev = dev;
1046
	ppgtt->base.dev = dev;
1159
	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1047
	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1160
 
1048
 
1161
	if (INTEL_INFO(dev)->gen < 8)
1049
	if (INTEL_INFO(dev)->gen < 8)
1162
		ret = gen6_ppgtt_init(ppgtt);
1050
		return gen6_ppgtt_init(ppgtt);
-
 
1051
	else if (IS_GEN8(dev) || IS_GEN9(dev))
1163
	else if (IS_GEN8(dev))
1052
		return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1164
		ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1053
	else
-
 
1054
		BUG();
-
 
1055
}
-
 
1056
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
-
 
1057
{
1165
	else
1058
	struct drm_i915_private *dev_priv = dev->dev_private;
1166
		BUG();
1059
	int ret = 0;
1167
 
1060
 
1168
	if (!ret) {
1061
	ret = __hw_ppgtt_init(dev, ppgtt);
1169
		struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1170
		kref_init(&ppgtt->ref);
-
 
1171
		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
-
 
1172
			    ppgtt->base.total);
-
 
1173
		i915_init_vm(dev_priv, &ppgtt->base);
1062
	if (ret == 0) {
-
 
1063
		kref_init(&ppgtt->ref);
-
 
1064
		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
1174
		if (INTEL_INFO(dev)->gen < 8) {
1065
			    ppgtt->base.total);
Line -... Line 1066...
-
 
1066
		i915_init_vm(dev_priv, &ppgtt->base);
-
 
1067
	}
-
 
1068
 
-
 
1069
	return ret;
-
 
1070
}
-
 
1071
 
-
 
1072
int i915_ppgtt_init_hw(struct drm_device *dev)
-
 
1073
{
-
 
1074
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1075
	struct intel_engine_cs *ring;
-
 
1076
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 
1077
	int i, ret = 0;
-
 
1078
 
-
 
1079
	/* In the case of execlists, PPGTT is enabled by the context descriptor
-
 
1080
	 * and the PDPs are contained within the context itself.  We don't
-
 
1081
	 * need to do anything here. */
-
 
1082
	if (i915.enable_execlists)
-
 
1083
		return 0;
-
 
1084
 
-
 
1085
	if (!USES_PPGTT(dev))
-
 
1086
		return 0;
-
 
1087
 
-
 
1088
	if (IS_GEN6(dev))
-
 
1089
		gen6_ppgtt_enable(dev);
-
 
1090
	else if (IS_GEN7(dev))
-
 
1091
		gen7_ppgtt_enable(dev);
-
 
1092
	else if (INTEL_INFO(dev)->gen >= 8)
-
 
1093
		gen8_ppgtt_enable(dev);
-
 
1094
	else
1175
			gen6_write_pdes(ppgtt);
1095
		WARN_ON(1);
1176
			DRM_DEBUG("Adding PPGTT at offset %x\n",
1096
 
-
 
1097
	if (ppgtt) {
-
 
1098
		for_each_ring(ring, dev_priv, i) {
-
 
1099
			ret = ppgtt->switch_mm(ppgtt, ring);
-
 
1100
			if (ret != 0)
-
 
1101
				return ret;
-
 
1102
		}
-
 
1103
	}
-
 
1104
 
-
 
1105
	return ret;
-
 
1106
}
-
 
1107
struct i915_hw_ppgtt *
-
 
1108
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
-
 
1109
{
-
 
1110
	struct i915_hw_ppgtt *ppgtt;
-
 
1111
	int ret;
-
 
1112
 
-
 
1113
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-
 
1114
	if (!ppgtt)
-
 
1115
		return ERR_PTR(-ENOMEM);
-
 
1116
 
-
 
1117
	ret = i915_ppgtt_init(dev, ppgtt);
-
 
1118
	if (ret) {
-
 
1119
		kfree(ppgtt);
-
 
1120
		return ERR_PTR(ret);
-
 
1121
	}
-
 
1122
 
-
 
1123
	ppgtt->file_priv = fpriv;
-
 
1124
 
-
 
1125
	trace_i915_ppgtt_create(&ppgtt->base);
-
 
1126
 
-
 
1127
	return ppgtt;
-
 
1128
}
-
 
1129
 
-
 
1130
void  i915_ppgtt_release(struct kref *kref)
-
 
1131
{
-
 
1132
	struct i915_hw_ppgtt *ppgtt =
-
 
1133
		container_of(kref, struct i915_hw_ppgtt, ref);
-
 
1134
 
-
 
1135
	trace_i915_ppgtt_release(&ppgtt->base);
-
 
1136
 
-
 
1137
	/* vmas should already be unbound */
-
 
1138
	WARN_ON(!list_empty(&ppgtt->base.active_list));
-
 
1139
	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
-
 
1140
 
Line 1177... Line 1141...
1177
				  ppgtt->pd_offset << 10);
1141
	list_del(&ppgtt->base.global_link);
1178
		}
1142
	drm_mm_takedown(&ppgtt->base.mm);
1179
	}
1143
 
1180
 
1144
	ppgtt->base.cleanup(&ppgtt->base);
Line 1252... Line 1216...
1252
	for_each_ring(ring, dev_priv, i) {
1216
	for_each_ring(ring, dev_priv, i) {
1253
		u32 fault_reg;
1217
		u32 fault_reg;
1254
		fault_reg = I915_READ(RING_FAULT_REG(ring));
1218
		fault_reg = I915_READ(RING_FAULT_REG(ring));
1255
		if (fault_reg & RING_FAULT_VALID) {
1219
		if (fault_reg & RING_FAULT_VALID) {
1256
			DRM_DEBUG_DRIVER("Unexpected fault\n"
1220
			DRM_DEBUG_DRIVER("Unexpected fault\n"
1257
					 "\tAddr: 0x%08lx\\n"
1221
					 "\tAddr: 0x%08lx\n"
1258
					 "\tAddress space: %s\n"
1222
					 "\tAddress space: %s\n"
1259
					 "\tSource ID: %d\n"
1223
					 "\tSource ID: %d\n"
1260
					 "\tType: %d\n",
1224
					 "\tType: %d\n",
1261
					 fault_reg & PAGE_MASK,
1225
					 fault_reg & PAGE_MASK,
1262
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1226
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
Line 1267... Line 1231...
1267
		}
1231
		}
1268
	}
1232
	}
1269
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
1233
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
1270
}
1234
}
Line -... Line 1235...
-
 
1235
 
-
 
1236
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
-
 
1237
{
-
 
1238
	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
-
 
1239
		intel_gtt_chipset_flush();
-
 
1240
	} else {
-
 
1241
		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-
 
1242
		POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
 
1243
	}
-
 
1244
}
1271
 
1245
 
1272
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1246
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1273
{
1247
{
Line 1274... Line 1248...
1274
	struct drm_i915_private *dev_priv = dev->dev_private;
1248
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1283... Line 1257...
1283
 
1257
 
1284
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1258
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1285
				       dev_priv->gtt.base.start,
1259
				       dev_priv->gtt.base.start,
1286
				       dev_priv->gtt.base.total,
1260
				       dev_priv->gtt.base.total,
-
 
1261
				       true);
-
 
1262
 
1287
				       true);
1263
	i915_ggtt_flush(dev_priv);
Line 1288... Line 1264...
1288
}
1264
}
1289
 
1265
 
1290
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1266
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
Line 1310... Line 1286...
1310
		i915_gem_clflush_object(obj, obj->pin_display);
1286
		i915_gem_clflush_object(obj, obj->pin_display);
1311
		/* The bind_vma code tries to be smart about tracking mappings.
1287
		/* The bind_vma code tries to be smart about tracking mappings.
1312
		 * Unfortunately above, we've just wiped out the mappings
1288
		 * Unfortunately above, we've just wiped out the mappings
1313
		 * without telling our object about it. So we need to fake it.
1289
		 * without telling our object about it. So we need to fake it.
1314
		 */
1290
		 */
1315
		obj->has_global_gtt_mapping = 0;
1291
		vma->bound &= ~GLOBAL_BIND;
1316
		vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1292
		vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1317
	}
1293
	}
Line 1318... Line 1294...
1318
 
1294
 
Line 1335... Line 1311...
1335
		}
1311
		}
Line 1336... Line 1312...
1336
 
1312
 
1337
		gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
1313
		gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
Line 1338... Line 1314...
1338
	}
1314
	}
1339
 
1315
 
Line 1340... Line 1316...
1340
	i915_gem_chipset_flush(dev);
1316
	i915_ggtt_flush(dev_priv);
1341
}
1317
}
1342
 
1318
 
Line 1507... Line 1483...
1507
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1483
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1508
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
1484
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
Line 1509... Line 1485...
1509
 
1485
 
1510
	BUG_ON(!i915_is_ggtt(vma->vm));
1486
	BUG_ON(!i915_is_ggtt(vma->vm));
1511
	intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1487
	intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1512
	vma->obj->has_global_gtt_mapping = 1;
1488
	vma->bound = GLOBAL_BIND;
Line 1513... Line 1489...
1513
}
1489
}
1514
 
1490
 
1515
static void i915_ggtt_clear_range(struct i915_address_space *vm,
1491
static void i915_ggtt_clear_range(struct i915_address_space *vm,
Line 1526... Line 1502...
1526
{
1502
{
1527
	const unsigned int first = vma->node.start >> PAGE_SHIFT;
1503
	const unsigned int first = vma->node.start >> PAGE_SHIFT;
1528
	const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1504
	const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
Line 1529... Line 1505...
1529
 
1505
 
1530
	BUG_ON(!i915_is_ggtt(vma->vm));
1506
	BUG_ON(!i915_is_ggtt(vma->vm));
1531
	vma->obj->has_global_gtt_mapping = 0;
1507
	vma->bound = 0;
1532
	intel_gtt_clear_range(first, size);
1508
	intel_gtt_clear_range(first, size);
Line 1533... Line 1509...
1533
}
1509
}
1534
 
1510
 
Line 1554... Line 1530...
1554
	 * NB: A global mapping should only be needed for special regions like
1530
	 * NB: A global mapping should only be needed for special regions like
1555
	 * "gtt mappable", SNB errata, or if specified via special execbuf
1531
	 * "gtt mappable", SNB errata, or if specified via special execbuf
1556
	 * flags. At all other times, the GPU will use the aliasing PPGTT.
1532
	 * flags. At all other times, the GPU will use the aliasing PPGTT.
1557
	 */
1533
	 */
1558
	if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1534
	if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1559
		if (!obj->has_global_gtt_mapping ||
1535
		if (!(vma->bound & GLOBAL_BIND) ||
1560
		    (cache_level != obj->cache_level)) {
1536
		    (cache_level != obj->cache_level)) {
1561
			vma->vm->insert_entries(vma->vm, obj->pages,
1537
			vma->vm->insert_entries(vma->vm, obj->pages,
1562
						vma->node.start,
1538
						vma->node.start,
1563
						cache_level, flags);
1539
						cache_level, flags);
1564
			obj->has_global_gtt_mapping = 1;
1540
			vma->bound |= GLOBAL_BIND;
1565
		}
1541
		}
1566
	}
1542
	}
Line 1567... Line 1543...
1567
 
1543
 
1568
	if (dev_priv->mm.aliasing_ppgtt &&
1544
	if (dev_priv->mm.aliasing_ppgtt &&
1569
	    (!obj->has_aliasing_ppgtt_mapping ||
1545
	    (!(vma->bound & LOCAL_BIND) ||
1570
	     (cache_level != obj->cache_level))) {
1546
	     (cache_level != obj->cache_level))) {
1571
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1547
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1572
		appgtt->base.insert_entries(&appgtt->base,
1548
		appgtt->base.insert_entries(&appgtt->base,
1573
					    vma->obj->pages,
1549
					    vma->obj->pages,
1574
					    vma->node.start,
1550
					    vma->node.start,
1575
					    cache_level, flags);
1551
					    cache_level, flags);
1576
		vma->obj->has_aliasing_ppgtt_mapping = 1;
1552
		vma->bound |= LOCAL_BIND;
1577
	}
1553
	}
Line 1578... Line 1554...
1578
}
1554
}
1579
 
1555
 
1580
static void ggtt_unbind_vma(struct i915_vma *vma)
1556
static void ggtt_unbind_vma(struct i915_vma *vma)
1581
{
1557
{
1582
	struct drm_device *dev = vma->vm->dev;
1558
	struct drm_device *dev = vma->vm->dev;
Line 1583... Line 1559...
1583
	struct drm_i915_private *dev_priv = dev->dev_private;
1559
	struct drm_i915_private *dev_priv = dev->dev_private;
1584
	struct drm_i915_gem_object *obj = vma->obj;
1560
	struct drm_i915_gem_object *obj = vma->obj;
1585
 
1561
 
1586
	if (obj->has_global_gtt_mapping) {
1562
	if (vma->bound & GLOBAL_BIND) {
1587
		vma->vm->clear_range(vma->vm,
1563
		vma->vm->clear_range(vma->vm,
1588
				     vma->node.start,
1564
				     vma->node.start,
1589
				     obj->base.size,
1565
				     obj->base.size,
Line 1590... Line 1566...
1590
				       true);
1566
				       true);
1591
		obj->has_global_gtt_mapping = 0;
1567
		vma->bound &= ~GLOBAL_BIND;
1592
	}
1568
	}
1593
 
1569
 
1594
	if (obj->has_aliasing_ppgtt_mapping) {
1570
	if (vma->bound & LOCAL_BIND) {
1595
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1571
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1596
		appgtt->base.clear_range(&appgtt->base,
1572
		appgtt->base.clear_range(&appgtt->base,
1597
					 vma->node.start,
1573
					 vma->node.start,
1598
					 obj->base.size,
1574
					 obj->base.size,
Line 1599... Line 1575...
1599
					 true);
1575
					 true);
1600
		obj->has_aliasing_ppgtt_mapping = 0;
1576
		vma->bound &= ~LOCAL_BIND;
Line 1632... Line 1608...
1632
		if (node->allocated && node->color != color)
1608
		if (node->allocated && node->color != color)
1633
			*end -= 4096;
1609
			*end -= 4096;
1634
	}
1610
	}
1635
}
1611
}
Line 1636... Line 1612...
1636
 
1612
 
1637
void i915_gem_setup_global_gtt(struct drm_device *dev,
1613
static int i915_gem_setup_global_gtt(struct drm_device *dev,
1638
			      unsigned long start,
1614
			      unsigned long start,
1639
			      unsigned long mappable_end,
1615
			      unsigned long mappable_end,
1640
			      unsigned long end)
1616
			      unsigned long end)
1641
{
1617
{
Line 1651... Line 1627...
1651
	struct drm_i915_private *dev_priv = dev->dev_private;
1627
	struct drm_i915_private *dev_priv = dev->dev_private;
1652
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1628
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1653
	struct drm_mm_node *entry;
1629
	struct drm_mm_node *entry;
1654
	struct drm_i915_gem_object *obj;
1630
	struct drm_i915_gem_object *obj;
1655
	unsigned long hole_start, hole_end;
1631
	unsigned long hole_start, hole_end;
-
 
1632
	int ret;
Line 1656... Line 1633...
1656
 
1633
 
Line 1657... Line 1634...
1657
	BUG_ON(mappable_end > end);
1634
	BUG_ON(mappable_end > end);
1658
 
1635
 
Line 1662... Line 1639...
1662
		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
1639
		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
Line 1663... Line 1640...
1663
 
1640
 
1664
	/* Mark any preallocated objects as occupied */
1641
	/* Mark any preallocated objects as occupied */
1665
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1642
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1666
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1643
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1667
		int ret;
1644
 
1668
		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1645
		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
Line 1669... Line 1646...
1669
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
1646
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
1670
 
1647
 
1671
		WARN_ON(i915_gem_obj_ggtt_bound(obj));
1648
		WARN_ON(i915_gem_obj_ggtt_bound(obj));
1672
		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1649
		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
-
 
1650
		if (ret) {
-
 
1651
			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
1673
		if (ret)
1652
			return ret;
1674
			DRM_DEBUG_KMS("Reservation failed\n");
1653
		}
Line 1675... Line 1654...
1675
		obj->has_global_gtt_mapping = 1;
1654
		vma->bound |= GLOBAL_BIND;
1676
	}
1655
	}
Line 1686... Line 1665...
1686
				     hole_end - hole_start, true);
1665
				     hole_end - hole_start, true);
1687
	}
1666
	}
Line 1688... Line 1667...
1688
 
1667
 
1689
	/* And finally clear the reserved guard page */
1668
	/* And finally clear the reserved guard page */
-
 
1669
	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
-
 
1670
 
-
 
1671
	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
-
 
1672
		struct i915_hw_ppgtt *ppgtt;
-
 
1673
 
-
 
1674
		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-
 
1675
		if (!ppgtt)
-
 
1676
			return -ENOMEM;
-
 
1677
 
-
 
1678
		ret = __hw_ppgtt_init(dev, ppgtt);
-
 
1679
		if (ret != 0)
-
 
1680
			return ret;
-
 
1681
 
-
 
1682
		dev_priv->mm.aliasing_ppgtt = ppgtt;
-
 
1683
	}
-
 
1684
 
1690
	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
1685
	return 0;
Line 1691... Line 1686...
1691
}
1686
}
1692
 
1687
 
1693
void i915_gem_init_global_gtt(struct drm_device *dev)
1688
void i915_gem_init_global_gtt(struct drm_device *dev)
Line 1699... Line 1694...
1699
	mappable_size = dev_priv->gtt.mappable_end;
1694
	mappable_size = dev_priv->gtt.mappable_end;
Line 1700... Line 1695...
1700
 
1695
 
1701
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1696
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Line -... Line 1697...
-
 
1697
}
-
 
1698
 
-
 
1699
void i915_global_gtt_cleanup(struct drm_device *dev)
-
 
1700
{
-
 
1701
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1702
	struct i915_address_space *vm = &dev_priv->gtt.base;
-
 
1703
 
-
 
1704
	if (dev_priv->mm.aliasing_ppgtt) {
-
 
1705
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 
1706
 
-
 
1707
		ppgtt->base.cleanup(&ppgtt->base);
-
 
1708
	}
-
 
1709
 
-
 
1710
	if (drm_mm_initialized(&vm->mm)) {
-
 
1711
		drm_mm_takedown(&vm->mm);
-
 
1712
		list_del(&vm->global_link);
-
 
1713
	}
-
 
1714
 
-
 
1715
	vm->cleanup(vm);
1702
}
1716
}
1703
 
1717
 
1704
static int setup_scratch_page(struct drm_device *dev)
1718
static int setup_scratch_page(struct drm_device *dev)
1705
{
1719
{
1706
	struct drm_i915_private *dev_priv = dev->dev_private;
1720
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1707... Line 1721...
1707
	struct page *page;
1721
	struct page *page;
1708
	dma_addr_t dma_addr;
1722
	dma_addr_t dma_addr;
1709
 
1723
 
1710
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-
 
1711
	if (page == NULL)
1724
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
Line 1712... Line 1725...
1712
		return -ENOMEM;
1725
	if (page == NULL)
1713
    get_page(page);
1726
		return -ENOMEM;
1714
	set_pages_uc(page, 1);
1727
	set_pages_uc(page, 1);
Line 1733... Line 1746...
1733
	struct page *page = dev_priv->gtt.base.scratch.page;
1746
	struct page *page = dev_priv->gtt.base.scratch.page;
Line 1734... Line 1747...
1734
 
1747
 
1735
	set_pages_wb(page, 1);
1748
	set_pages_wb(page, 1);
1736
	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1749
	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1737
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-
 
1738
	put_page(page);
1750
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1739
	__free_page(page);
1751
	__free_page(page);
Line 1740... Line 1752...
1740
}
1752
}
1741
 
1753
 
Line 1803... Line 1815...
1803
		return (gmch_ctrl - 0x11 + 2) << 22;
1815
		return (gmch_ctrl - 0x11 + 2) << 22;
1804
	else
1816
	else
1805
		return (gmch_ctrl - 0x17 + 9) << 22;
1817
		return (gmch_ctrl - 0x17 + 9) << 22;
1806
}
1818
}
Line -... Line 1819...
-
 
1819
 
-
 
1820
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
-
 
1821
{
-
 
1822
	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
-
 
1823
	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
-
 
1824
 
-
 
1825
	if (gen9_gmch_ctl < 0xf0)
-
 
1826
		return gen9_gmch_ctl << 25; /* 32 MB units */
-
 
1827
	else
-
 
1828
		/* 4MB increments starting at 0xf0 for 4MB */
-
 
1829
		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
-
 
1830
}
1807
 
1831
 
1808
static int ggtt_probe_common(struct drm_device *dev,
1832
static int ggtt_probe_common(struct drm_device *dev,
1809
			     size_t gtt_size)
1833
			     size_t gtt_size)
1810
{
1834
{
1811
	struct drm_i915_private *dev_priv = dev->dev_private;
1835
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1846... Line 1870...
1846
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1870
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1847
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1871
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1848
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1872
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1849
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1873
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
Line -... Line 1874...
-
 
1874
 
-
 
1875
	if (!USES_PPGTT(dev_priv->dev))
-
 
1876
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
-
 
1877
		 * so RTL will always use the value corresponding to
-
 
1878
		 * pat_sel = 000".
-
 
1879
		 * So let's disable cache for GGTT to avoid screen corruptions.
-
 
1880
		 * MOCS still can be used though.
-
 
1881
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
-
 
1882
		 * before this patch, i.e. the same uncached + snooping access
-
 
1883
		 * like on gen6/7 seems to be in effect.
-
 
1884
		 * - So this just fixes blitter/render access. Again it looks
-
 
1885
		 * like it's not just uncached access, but uncached + snooping.
-
 
1886
		 * So we can still hold onto all our assumptions wrt cpu
-
 
1887
		 * clflushing on LLC machines.
-
 
1888
		 */
-
 
1889
		pat = GEN8_PPAT(0, GEN8_PPAT_UC);
1850
 
1890
 
1851
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1891
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1852
	 * write would work. */
1892
	 * write would work. */
1853
	I915_WRITE(GEN8_PRIVATE_PAT, pat);
1893
	I915_WRITE(GEN8_PRIVATE_PAT, pat);
1854
	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1894
	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
Line 1862... Line 1902...
1862
	 * Map WB on BDW to snooped on CHV.
1902
	 * Map WB on BDW to snooped on CHV.
1863
	 *
1903
	 *
1864
	 * Only the snoop bit has meaning for CHV, the rest is
1904
	 * Only the snoop bit has meaning for CHV, the rest is
1865
	 * ignored.
1905
	 * ignored.
1866
	 *
1906
	 *
1867
	 * Note that the harware enforces snooping for all page
1907
	 * The hardware will never snoop for certain types of accesses:
-
 
1908
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
-
 
1909
	 * - PPGTT page tables
-
 
1910
	 * - some other special cycles
-
 
1911
	 *
-
 
1912
	 * As with BDW, we also need to consider the following for GT accesses:
-
 
1913
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
1868
	 * table accesses. The snoop bit is actually ignored for
1914
	 * so RTL will always use the value corresponding to
1869
	 * PDEs.
1915
	 * pat_sel = 000".
-
 
1916
	 * Which means we must set the snoop bit in PAT entry 0
-
 
1917
	 * in order to keep the global status page working.
1870
	 */
1918
	 */
1871
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1919
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1872
	      GEN8_PPAT(1, 0) |
1920
	      GEN8_PPAT(1, 0) |
1873
	      GEN8_PPAT(2, 0) |
1921
	      GEN8_PPAT(2, 0) |
1874
	      GEN8_PPAT(3, 0) |
1922
	      GEN8_PPAT(3, 0) |
Line 1899... Line 1947...
1899
	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1947
	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1900
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
1948
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
Line 1901... Line 1949...
1901
 
1949
 
Line -... Line 1950...
-
 
1950
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
 
1951
 
-
 
1952
	if (INTEL_INFO(dev)->gen >= 9) {
1902
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1953
		*stolen = gen9_get_stolen_size(snb_gmch_ctl);
1903
 
1954
		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1904
	if (IS_CHERRYVIEW(dev)) {
1955
	} else if (IS_CHERRYVIEW(dev)) {
1905
		*stolen = chv_get_stolen_size(snb_gmch_ctl);
1956
		*stolen = chv_get_stolen_size(snb_gmch_ctl);
1906
		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1957
		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1907
	} else {
1958
	} else {
Line 1967... Line 2018...
1967
static void gen6_gmch_remove(struct i915_address_space *vm)
2018
static void gen6_gmch_remove(struct i915_address_space *vm)
1968
{
2019
{
Line 1969... Line 2020...
1969
 
2020
 
Line 1970... Line -...
1970
	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
-
 
1971
 
-
 
1972
	if (drm_mm_initialized(&vm->mm)) {
-
 
1973
		drm_mm_takedown(&vm->mm);
-
 
1974
		list_del(&vm->global_link);
2021
	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
1975
	}
2022
 
1976
	iounmap(gtt->gsm);
2023
	iounmap(gtt->gsm);
Line 1977... Line 2024...
1977
	teardown_scratch_page(vm->dev);
2024
	teardown_scratch_page(vm->dev);
Line 2075... Line 2122...
2075
	INIT_LIST_HEAD(&vma->exec_list);
2122
	INIT_LIST_HEAD(&vma->exec_list);
2076
	vma->vm = vm;
2123
	vma->vm = vm;
2077
	vma->obj = obj;
2124
	vma->obj = obj;
Line 2078... Line 2125...
2078
 
2125
 
-
 
2126
	switch (INTEL_INFO(vm->dev)->gen) {
2079
	switch (INTEL_INFO(vm->dev)->gen) {
2127
	case 9:
2080
	case 8:
2128
	case 8:
2081
	case 7:
2129
	case 7:
2082
	case 6:
2130
	case 6:
2083
		if (i915_is_ggtt(vm)) {
2131
		if (i915_is_ggtt(vm)) {
Line 2101... Line 2149...
2101
	}
2149
	}
Line 2102... Line 2150...
2102
 
2150
 
2103
	/* Keep GGTT vmas first to make debug easier */
2151
	/* Keep GGTT vmas first to make debug easier */
2104
	if (i915_is_ggtt(vm))
2152
	if (i915_is_ggtt(vm))
2105
		list_add(&vma->vma_link, &obj->vma_list);
2153
		list_add(&vma->vma_link, &obj->vma_list);
2106
	else
2154
	else {
-
 
2155
		list_add_tail(&vma->vma_link, &obj->vma_list);
-
 
2156
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
Line 2107... Line 2157...
2107
		list_add_tail(&vma->vma_link, &obj->vma_list);
2157
	}
2108
 
2158
 
Line 2109... Line 2159...
2109
	return vma;
2159
	return vma;