Subversion Repositories Kolibri OS

Rev

Rev 4126 | Rev 4539 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4126 Rev 4280
Line 63... Line 63...
63
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
63
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
64
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
64
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
65
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
65
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
Line 66... Line 66...
66
 
66
 
67
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
67
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
-
 
68
				     enum i915_cache_level level,
68
				     enum i915_cache_level level)
69
				     bool valid)
69
{
70
{
70
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
71
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Line 71... Line 72...
71
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
72
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
72
 
73
 
73
	switch (level) {
74
	switch (level) {
Line 84... Line 85...
84
 
85
 
85
	return pte;
86
	return pte;
Line 86... Line 87...
86
}
87
}
87
 
88
 
-
 
89
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
88
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
90
				     enum i915_cache_level level,
89
				   enum i915_cache_level level)
91
				     bool valid)
90
{
92
{
Line 91... Line 93...
91
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
93
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
92
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
94
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
93
 
95
 
Line 110... Line 112...
110
 
112
 
111
#define BYT_PTE_WRITEABLE		(1 << 1)
113
#define BYT_PTE_WRITEABLE		(1 << 1)
Line 112... Line 114...
112
#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
114
#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
113
 
115
 
-
 
116
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
114
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
117
				     enum i915_cache_level level,
115
				     enum i915_cache_level level)
118
				     bool valid)
116
{
119
{
Line 117... Line 120...
117
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
120
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
118
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
121
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
119
 
122
 
Line 127... Line 130...
127
 
130
 
128
	return pte;
131
	return pte;
Line 129... Line 132...
129
}
132
}
130
 
133
 
-
 
134
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
131
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
135
				     enum i915_cache_level level,
132
				     enum i915_cache_level level)
136
				     bool valid)
133
{
137
{
Line 134... Line 138...
134
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
138
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
135
	pte |= HSW_PTE_ADDR_ENCODE(addr);
139
	pte |= HSW_PTE_ADDR_ENCODE(addr);
Line 136... Line 140...
136
 
140
 
137
	if (level != I915_CACHE_NONE)
141
	if (level != I915_CACHE_NONE)
Line 138... Line 142...
138
		pte |= HSW_WB_LLC_AGE3;
142
		pte |= HSW_WB_LLC_AGE3;
139
 
143
 
-
 
144
	return pte;
140
	return pte;
145
}
141
}
146
 
142
 
147
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
Line 143... Line 148...
143
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
148
				      enum i915_cache_level level,
144
				      enum i915_cache_level level)
149
				      bool valid)
145
{
150
{
Line 241... Line 246...
241
}
246
}
Line 242... Line 247...
242
 
247
 
243
/* PPGTT support for Sandybdrige/Gen6 and later */
248
/* PPGTT support for Sandybdrige/Gen6 and later */
244
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
249
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
245
				   unsigned first_entry,
250
				   unsigned first_entry,
-
 
251
				   unsigned num_entries,
246
				   unsigned num_entries)
252
				   bool use_scratch)
247
{
253
{
248
	struct i915_hw_ppgtt *ppgtt =
254
	struct i915_hw_ppgtt *ppgtt =
249
		container_of(vm, struct i915_hw_ppgtt, base);
255
		container_of(vm, struct i915_hw_ppgtt, base);
250
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
256
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
251
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
257
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
252
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
258
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
Line 253... Line 259...
253
	unsigned last_pte, i;
259
	unsigned last_pte, i;
Line 254... Line 260...
254
 
260
 
Line 255... Line 261...
255
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
261
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
256
 
262
 
Line 299... Line 305...
299
    MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
305
    MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
300
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
306
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
301
		dma_addr_t page_addr;
307
		dma_addr_t page_addr;
Line 302... Line 308...
302
 
308
 
303
		page_addr = sg_page_iter_dma_address(&sg_iter);
309
		page_addr = sg_page_iter_dma_address(&sg_iter);
304
		pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
310
		pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
305
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
311
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
306
			act_pt++;
312
			act_pt++;
307
    		MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
313
    		MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
Line 378... Line 384...
378
 
384
 
379
        ppgtt->pt_dma_addr[i] = pt_addr;
385
        ppgtt->pt_dma_addr[i] = pt_addr;
Line 380... Line 386...
380
    }
386
    }
381
 
387
 
Line 382... Line 388...
382
	ppgtt->base.clear_range(&ppgtt->base, 0,
388
	ppgtt->base.clear_range(&ppgtt->base, 0,
Line 383... Line 389...
383
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
389
				ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
Line 455... Line 461...
455
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
461
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
456
			      struct drm_i915_gem_object *obj)
462
			      struct drm_i915_gem_object *obj)
457
{
463
{
458
	ppgtt->base.clear_range(&ppgtt->base,
464
	ppgtt->base.clear_range(&ppgtt->base,
459
				i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
465
				i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
460
			       obj->base.size >> PAGE_SHIFT);
466
				obj->base.size >> PAGE_SHIFT,
-
 
467
				true);
461
}
468
}
Line 462... Line 469...
462
 
469
 
463
extern int intel_iommu_gfx_mapped;
470
extern int intel_iommu_gfx_mapped;
464
/* Certain Gen5 chipsets require require idling the GPU before
471
/* Certain Gen5 chipsets require require idling the GPU before
Line 496... Line 503...
496
{
503
{
497
	if (unlikely(dev_priv->gtt.do_idle_maps))
504
	if (unlikely(dev_priv->gtt.do_idle_maps))
498
		dev_priv->mm.interruptible = interruptible;
505
		dev_priv->mm.interruptible = interruptible;
499
}
506
}
Line -... Line 507...
-
 
507
 
-
 
508
void i915_check_and_clear_faults(struct drm_device *dev)
-
 
509
{
-
 
510
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
511
	struct intel_ring_buffer *ring;
-
 
512
	int i;
-
 
513
 
-
 
514
	if (INTEL_INFO(dev)->gen < 6)
-
 
515
		return;
-
 
516
 
-
 
517
	for_each_ring(ring, dev_priv, i) {
-
 
518
		u32 fault_reg;
-
 
519
		fault_reg = I915_READ(RING_FAULT_REG(ring));
-
 
520
		if (fault_reg & RING_FAULT_VALID) {
-
 
521
			DRM_DEBUG_DRIVER("Unexpected fault\n"
-
 
522
					 "\tAddr: 0x%08lx\\n"
-
 
523
					 "\tAddress space: %s\n"
-
 
524
					 "\tSource ID: %d\n"
-
 
525
					 "\tType: %d\n",
-
 
526
					 fault_reg & PAGE_MASK,
-
 
527
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
-
 
528
					 RING_FAULT_SRCID(fault_reg),
-
 
529
					 RING_FAULT_FAULT_TYPE(fault_reg));
-
 
530
			I915_WRITE(RING_FAULT_REG(ring),
-
 
531
				   fault_reg & ~RING_FAULT_VALID);
-
 
532
		}
-
 
533
	}
-
 
534
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
-
 
535
}
-
 
536
 
-
 
537
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
-
 
538
{
-
 
539
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
540
 
-
 
541
	/* Don't bother messing with faults pre GEN6 as we have little
-
 
542
	 * documentation supporting that it's a good idea.
-
 
543
	 */
-
 
544
	if (INTEL_INFO(dev)->gen < 6)
-
 
545
		return;
-
 
546
 
-
 
547
	i915_check_and_clear_faults(dev);
-
 
548
 
-
 
549
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-
 
550
				       dev_priv->gtt.base.start / PAGE_SIZE,
-
 
551
				       dev_priv->gtt.base.total / PAGE_SIZE,
-
 
552
				       false);
-
 
553
}
500
 
554
 
501
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
555
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
502
{
556
{
503
	struct drm_i915_private *dev_priv = dev->dev_private;
557
	struct drm_i915_private *dev_priv = dev->dev_private;
Line -... Line 558...
-
 
558
	struct drm_i915_gem_object *obj;
-
 
559
 
504
	struct drm_i915_gem_object *obj;
560
	i915_check_and_clear_faults(dev);
505
 
561
 
506
	/* First fill our portion of the GTT with scratch pages */
562
	/* First fill our portion of the GTT with scratch pages */
507
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
563
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-
 
564
				       dev_priv->gtt.base.start / PAGE_SIZE,
Line 508... Line 565...
508
				       dev_priv->gtt.base.start / PAGE_SIZE,
565
				       dev_priv->gtt.base.total / PAGE_SIZE,
509
				       dev_priv->gtt.base.total / PAGE_SIZE);
566
				       true);
510
 
567
 
511
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
568
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Line 547... Line 604...
547
	struct sg_page_iter sg_iter;
604
	struct sg_page_iter sg_iter;
548
	dma_addr_t addr;
605
	dma_addr_t addr;
Line 549... Line 606...
549
 
606
 
550
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
607
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
551
		addr = sg_page_iter_dma_address(&sg_iter);
608
		addr = sg_page_iter_dma_address(&sg_iter);
552
		iowrite32(vm->pte_encode(addr, level), >t_entries[i]);
609
		iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]);
553
			i++;
610
			i++;
Line 554... Line 611...
554
		}
611
		}
555
 
612
 
Line 559... Line 616...
559
	 * of NUMA access patterns. Therefore, even with the way we assume
616
	 * of NUMA access patterns. Therefore, even with the way we assume
560
	 * hardware should work, we must keep this posting read for paranoia.
617
	 * hardware should work, we must keep this posting read for paranoia.
561
	 */
618
	 */
562
	if (i != 0)
619
	if (i != 0)
563
		WARN_ON(readl(>t_entries[i-1]) !=
620
		WARN_ON(readl(>t_entries[i-1]) !=
564
			vm->pte_encode(addr, level));
621
			vm->pte_encode(addr, level, true));
Line 565... Line 622...
565
 
622
 
566
	/* This next bit makes the above posting read even more important. We
623
	/* This next bit makes the above posting read even more important. We
567
	 * want to flush the TLBs only after we're certain all the PTE updates
624
	 * want to flush the TLBs only after we're certain all the PTE updates
568
	 * have finished.
625
	 * have finished.
Line 571... Line 628...
571
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
628
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
572
}
629
}
Line 573... Line 630...
573
 
630
 
574
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
631
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
575
				  unsigned int first_entry,
632
				  unsigned int first_entry,
-
 
633
				  unsigned int num_entries,
576
				  unsigned int num_entries)
634
				  bool use_scratch)
577
{
635
{
578
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
636
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
579
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
637
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
580
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
638
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Line 584... Line 642...
584
	if (WARN(num_entries > max_entries,
642
	if (WARN(num_entries > max_entries,
585
		 "First entry = %d; Num entries = %d (max=%d)\n",
643
		 "First entry = %d; Num entries = %d (max=%d)\n",
586
		 first_entry, num_entries, max_entries))
644
		 first_entry, num_entries, max_entries))
587
        num_entries = max_entries;
645
        num_entries = max_entries;
Line 588... Line 646...
588
 
646
 
-
 
647
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
589
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
648
 
590
	for (i = 0; i < num_entries; i++)
649
	for (i = 0; i < num_entries; i++)
591
		iowrite32(scratch_pte, >t_base[i]);
650
		iowrite32(scratch_pte, >t_base[i]);
592
	readl(gtt_base);
651
	readl(gtt_base);
Line 605... Line 664...
605
 
664
 
Line 606... Line 665...
606
}
665
}
607
 
666
 
608
static void i915_ggtt_clear_range(struct i915_address_space *vm,
667
static void i915_ggtt_clear_range(struct i915_address_space *vm,
-
 
668
				  unsigned int first_entry,
609
				  unsigned int first_entry,
669
				  unsigned int num_entries,
610
				  unsigned int num_entries)
670
				  bool unused)
611
{
671
{
Line 633... Line 693...
633
	struct drm_i915_private *dev_priv = dev->dev_private;
693
	struct drm_i915_private *dev_priv = dev->dev_private;
634
	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
694
	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Line 635... Line 695...
635
 
695
 
636
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
696
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
637
				       entry,
697
				       entry,
-
 
698
				       obj->base.size >> PAGE_SHIFT,
Line 638... Line 699...
638
			      obj->base.size >> PAGE_SHIFT);
699
				       true);
639
 
700
 
Line 640... Line 701...
640
	obj->has_global_gtt_mapping = 0;
701
	obj->has_global_gtt_mapping = 0;
Line 720... Line 781...
720
	/* Clear any non-preallocated blocks */
781
	/* Clear any non-preallocated blocks */
721
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
782
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
722
		const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
783
		const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
723
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
784
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
724
			      hole_start, hole_end);
785
			      hole_start, hole_end);
725
		ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
786
		ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
726
	}
787
	}
Line 727... Line 788...
727
 
788
 
728
	/* And finally clear the reserved guard page */
789
	/* And finally clear the reserved guard page */
729
	ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
790
	ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
Line 730... Line 791...
730
}
791
}
731
 
792
 
732
static bool
793
static bool
Line 750... Line 811...
750
	unsigned long gtt_size, mappable_size;
811
	unsigned long gtt_size, mappable_size;
Line 751... Line 812...
751
 
812
 
752
	gtt_size = dev_priv->gtt.base.total;
813
	gtt_size = dev_priv->gtt.base.total;
Line 753... Line -...
753
	mappable_size = dev_priv->gtt.mappable_end;
-
 
754
 
814
	mappable_size = dev_priv->gtt.mappable_end;
755
#if 0
815
 
Line 756... Line 816...
756
	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
816
	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
757
		int ret;
817
		int ret;
758
 
818
 
759
		if (INTEL_INFO(dev)->gen <= 7) {
819
		if (INTEL_INFO(dev)->gen <= 7) {
760
		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
820
		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
Line 761... Line 821...
761
		 * aperture accordingly when using aliasing ppgtt. */
821
		 * aperture accordingly when using aliasing ppgtt. */
Line 762... Line 822...
762
			gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
822
			gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
763
		}
823
		}
764
 
824
 
Line 765... Line 825...
765
        i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE);
825
		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
766
 
826
 
767
		ret = i915_gem_init_aliasing_ppgtt(dev);
827
		ret = i915_gem_init_aliasing_ppgtt(dev);
768
		if (!ret)
828
		if (!ret)
769
			return;
-
 
770
 
-
 
771
		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
829
			return;
772
		drm_mm_takedown(&dev_priv->gtt.base.mm);
830
 
Line 773... Line 831...
773
		gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
831
		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
774
}
832
		drm_mm_takedown(&dev_priv->gtt.base.mm);
775
#endif
833
		gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;