Subversion Repositories Kolibri OS

Rev

Rev 5139 | Rev 6084 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5139 Rev 5354
Line 31... Line 31...
31
#include "i915_drv.h"
31
#include "i915_drv.h"
32
#include 
32
#include 
33
#include "i915_trace.h"
33
#include "i915_trace.h"
34
#include "intel_drv.h"
34
#include "intel_drv.h"
Line -... Line 35...
-
 
35
 
35
 
36
bool
-
 
37
intel_ring_initialized(struct intel_engine_cs *ring)
-
 
38
{
-
 
39
	struct drm_device *dev = ring->dev;
-
 
40
 
-
 
41
	if (!dev)
-
 
42
		return false;
-
 
43
 
36
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
44
	if (i915.enable_execlists) {
37
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
45
		struct intel_context *dctx = ring->default_context;
-
 
46
		struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
38
 * to give some inclination as to some of the magic values used in the various
47
 
39
 * workarounds!
48
		return ringbuf->obj;
40
 */
49
	} else
-
 
50
		return ring->buffer && ring->buffer->obj;
Line 41... Line 51...
41
#define CACHELINE_BYTES 64
51
}
42
 
52
 
43
static inline int __ring_space(int head, int tail, int size)
53
int __intel_ring_space(int head, int tail, int size)
44
{
54
{
45
	int space = head - (tail + I915_RING_FREE_SPACE);
55
	int space = head - (tail + I915_RING_FREE_SPACE);
46
	if (space < 0)
56
	if (space < 0)
47
		space += size;
57
		space += size;
Line 48... Line 58...
48
	return space;
58
	return space;
49
}
59
}
50
 
60
 
-
 
61
int intel_ring_space(struct intel_ringbuffer *ringbuf)
51
static inline int ring_space(struct intel_ringbuffer *ringbuf)
62
{
Line 52... Line 63...
52
{
63
	return __intel_ring_space(ringbuf->head & HEAD_ADDR,
53
	return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
64
				  ringbuf->tail, ringbuf->size);
54
}
65
}
55
 
66
 
56
static bool intel_ring_stopped(struct intel_engine_cs *ring)
67
bool intel_ring_stopped(struct intel_engine_cs *ring)
Line 349... Line 360...
349
	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
360
	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
350
	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
361
	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
351
	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
362
	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
352
	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
363
	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
353
	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
364
	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
 
365
		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
354
		/*
366
		/*
355
		 * TLB invalidate requires a post-sync write.
367
		 * TLB invalidate requires a post-sync write.
356
		 */
368
		 */
357
		flags |= PIPE_CONTROL_QW_WRITE;
369
		flags |= PIPE_CONTROL_QW_WRITE;
358
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
370
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Line -... Line 371...
-
 
371
 
-
 
372
		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
359
 
373
 
360
		/* Workaround: we must issue a pipe_control with CS-stall bit
374
		/* Workaround: we must issue a pipe_control with CS-stall bit
361
		 * set before a pipe_control command that has the state cache
375
		 * set before a pipe_control command that has the state cache
362
		 * invalidate bit set. */
376
		 * invalidate bit set. */
363
		gen7_render_ring_cs_stall_wa(ring);
377
		gen7_render_ring_cs_stall_wa(ring);
Line 431... Line 445...
431
					     0);
445
					     0);
432
	if (ret)
446
	if (ret)
433
		return ret;
447
		return ret;
434
	}
448
	}
Line 435... Line 449...
435
 
449
 
-
 
450
	ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
-
 
451
	if (ret)
-
 
452
		return ret;
-
 
453
 
-
 
454
	if (!invalidate_domains && flush_domains)
-
 
455
		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
 
456
 
436
	return gen8_emit_pipe_control(ring, flags, scratch_addr);
457
	return 0;
Line 437... Line 458...
437
}
458
}
438
 
459
 
439
static void ring_write_tail(struct intel_engine_cs *ring,
460
static void ring_write_tail(struct intel_engine_cs *ring,
Line 474... Line 495...
474
{
495
{
475
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
496
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
Line 476... Line 497...
476
 
497
 
477
	if (!IS_GEN2(ring->dev)) {
498
	if (!IS_GEN2(ring->dev)) {
478
		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
499
		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
479
		if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
500
		if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-
 
501
			DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
-
 
502
			/* Sometimes we observe that the idle flag is not
-
 
503
			 * set even though the ring is empty. So double
-
 
504
			 * check before giving up.
-
 
505
			 */
480
			DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
506
			if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
481
			return false;
507
			return false;
482
		}
508
		}
Line 483... Line 509...
483
	}
509
	}
Line 538... Line 564...
538
	/* Initialize the ring. This must happen _after_ we've cleared the ring
564
	/* Initialize the ring. This must happen _after_ we've cleared the ring
539
	 * registers with the above sequence (the readback of the HEAD registers
565
	 * registers with the above sequence (the readback of the HEAD registers
540
	 * also enforces ordering), otherwise the hw might lose the new ring
566
	 * also enforces ordering), otherwise the hw might lose the new ring
541
	 * register values. */
567
	 * register values. */
542
	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
568
	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
-
 
569
 
-
 
570
	/* WaClearRingBufHeadRegAtInit:ctg,elk */
-
 
571
	if (I915_READ_HEAD(ring))
-
 
572
		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
-
 
573
			  ring->name, I915_READ_HEAD(ring));
-
 
574
	I915_WRITE_HEAD(ring, 0);
-
 
575
	(void)I915_READ_HEAD(ring);
-
 
576
 
543
	I915_WRITE_CTL(ring,
577
	I915_WRITE_CTL(ring,
544
			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
578
			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
545
			| RING_VALID);
579
			| RING_VALID);
Line 546... Line 580...
546
 
580
 
Line 556... Line 590...
556
			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
590
			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
557
		ret = -EIO;
591
		ret = -EIO;
558
		goto out;
592
		goto out;
559
	}
593
	}
Line 560... Line -...
560
 
-
 
561
 
594
 
562
		ringbuf->head = I915_READ_HEAD(ring);
595
		ringbuf->head = I915_READ_HEAD(ring);
563
		ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
596
		ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
564
		ringbuf->space = ring_space(ringbuf);
597
	ringbuf->space = intel_ring_space(ringbuf);
Line 565... Line 598...
565
		ringbuf->last_retired_head = -1;
598
		ringbuf->last_retired_head = -1;
Line 566... Line 599...
566
 
599
 
567
	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
600
	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
Line 568... Line 601...
568
 
601
 
569
out:
602
out:
Line -... Line 603...
-
 
603
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
-
 
604
 
-
 
605
	return ret;
-
 
606
}
-
 
607
 
-
 
608
void
570
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
609
intel_fini_pipe_control(struct intel_engine_cs *ring)
-
 
610
{
-
 
611
	struct drm_device *dev = ring->dev;
-
 
612
 
-
 
613
	if (ring->scratch.obj == NULL)
-
 
614
		return;
-
 
615
 
-
 
616
	if (INTEL_INFO(dev)->gen >= 5) {
-
 
617
		kunmap(sg_page(ring->scratch.obj->pages->sgl));
-
 
618
		i915_gem_object_ggtt_unpin(ring->scratch.obj);
-
 
619
	}
-
 
620
 
571
 
621
	drm_gem_object_unreference(&ring->scratch.obj->base);
572
	return ret;
622
	ring->scratch.obj = NULL;
573
}
623
}
Line 574... Line 624...
574
 
624
 
575
static int
625
int
Line 594... Line 644...
594
	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
644
	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
595
	if (ret)
645
	if (ret)
596
		goto err_unref;
646
		goto err_unref;
Line 597... Line 647...
597
 
647
 
598
	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
648
	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
599
    ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW|0x100);
649
	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
600
	if (ring->scratch.cpu_page == NULL) {
650
	if (ring->scratch.cpu_page == NULL) {
601
		ret = -ENOMEM;
651
		ret = -ENOMEM;
602
		goto err_unpin;
652
		goto err_unpin;
Line 612... Line 662...
612
	drm_gem_object_unreference(&ring->scratch.obj->base);
662
	drm_gem_object_unreference(&ring->scratch.obj->base);
613
err:
663
err:
614
	return ret;
664
	return ret;
615
}
665
}
Line -... Line 666...
-
 
666
 
-
 
667
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
-
 
668
				       struct intel_context *ctx)
-
 
669
{
-
 
670
	int ret, i;
-
 
671
	struct drm_device *dev = ring->dev;
-
 
672
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
673
	struct i915_workarounds *w = &dev_priv->workarounds;
-
 
674
 
-
 
675
	if (WARN_ON(w->count == 0))
-
 
676
		return 0;
-
 
677
 
-
 
678
	ring->gpu_caches_dirty = true;
-
 
679
	ret = intel_ring_flush_all_caches(ring);
-
 
680
	if (ret)
-
 
681
		return ret;
-
 
682
 
-
 
683
	ret = intel_ring_begin(ring, (w->count * 2 + 2));
-
 
684
	if (ret)
-
 
685
		return ret;
-
 
686
 
-
 
687
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
-
 
688
	for (i = 0; i < w->count; i++) {
-
 
689
		intel_ring_emit(ring, w->reg[i].addr);
-
 
690
		intel_ring_emit(ring, w->reg[i].value);
-
 
691
	}
-
 
692
	intel_ring_emit(ring, MI_NOOP);
-
 
693
 
-
 
694
	intel_ring_advance(ring);
-
 
695
 
-
 
696
	ring->gpu_caches_dirty = true;
-
 
697
	ret = intel_ring_flush_all_caches(ring);
-
 
698
	if (ret)
-
 
699
		return ret;
-
 
700
 
-
 
701
	DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
-
 
702
 
-
 
703
	return 0;
-
 
704
}
-
 
705
 
-
 
706
static int wa_add(struct drm_i915_private *dev_priv,
-
 
707
		  const u32 addr, const u32 mask, const u32 val)
-
 
708
{
-
 
709
	const u32 idx = dev_priv->workarounds.count;
-
 
710
 
-
 
711
	if (WARN_ON(idx >= I915_MAX_WA_REGS))
-
 
712
		return -ENOSPC;
-
 
713
 
-
 
714
	dev_priv->workarounds.reg[idx].addr = addr;
-
 
715
	dev_priv->workarounds.reg[idx].value = val;
-
 
716
	dev_priv->workarounds.reg[idx].mask = mask;
-
 
717
 
-
 
718
	dev_priv->workarounds.count++;
-
 
719
 
-
 
720
	return 0;
-
 
721
}
-
 
722
 
-
 
723
#define WA_REG(addr, mask, val) { \
-
 
724
		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
-
 
725
		if (r) \
-
 
726
			return r; \
-
 
727
	}
-
 
728
 
-
 
729
#define WA_SET_BIT_MASKED(addr, mask) \
-
 
730
	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
-
 
731
 
-
 
732
#define WA_CLR_BIT_MASKED(addr, mask) \
-
 
733
	WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
-
 
734
 
-
 
735
#define WA_SET_FIELD_MASKED(addr, mask, value) \
-
 
736
	WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-
 
737
 
-
 
738
#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-
 
739
#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
 
740
 
-
 
741
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
 
742
 
-
 
743
static int bdw_init_workarounds(struct intel_engine_cs *ring)
-
 
744
{
-
 
745
	struct drm_device *dev = ring->dev;
-
 
746
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
747
 
-
 
748
	/* WaDisablePartialInstShootdown:bdw */
-
 
749
	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
-
 
750
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-
 
751
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
-
 
752
			  STALL_DOP_GATING_DISABLE);
-
 
753
 
-
 
754
	/* WaDisableDopClockGating:bdw */
-
 
755
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
-
 
756
			  DOP_CLOCK_GATING_DISABLE);
-
 
757
 
-
 
758
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-
 
759
			  GEN8_SAMPLER_POWER_BYPASS_DIS);
-
 
760
 
-
 
761
	/* Use Force Non-Coherent whenever executing a 3D context. This is a
-
 
762
	 * workaround for for a possible hang in the unlikely event a TLB
-
 
763
	 * invalidation occurs during a PSD flush.
-
 
764
	 */
-
 
765
	/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
-
 
766
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-
 
767
			  HDC_FORCE_NON_COHERENT |
-
 
768
			  (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
 
769
 
-
 
770
	/* Wa4x4STCOptimizationDisable:bdw */
-
 
771
	WA_SET_BIT_MASKED(CACHE_MODE_1,
-
 
772
			  GEN8_4x4_STC_OPTIMIZATION_DISABLE);
-
 
773
 
-
 
774
	/*
-
 
775
	 * BSpec recommends 8x4 when MSAA is used,
-
 
776
	 * however in practice 16x4 seems fastest.
-
 
777
	 *
-
 
778
	 * Note that PS/WM thread counts depend on the WIZ hashing
-
 
779
	 * disable bit, which we don't touch here, but it's good
-
 
780
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-
 
781
	 */
-
 
782
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
-
 
783
			    GEN6_WIZ_HASHING_MASK,
-
 
784
			    GEN6_WIZ_HASHING_16x4);
-
 
785
 
-
 
786
	return 0;
-
 
787
}
-
 
788
 
-
 
789
static int chv_init_workarounds(struct intel_engine_cs *ring)
-
 
790
{
-
 
791
	struct drm_device *dev = ring->dev;
-
 
792
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
793
 
-
 
794
	/* WaDisablePartialInstShootdown:chv */
-
 
795
	/* WaDisableThreadStallDopClockGating:chv */
-
 
796
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-
 
797
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
-
 
798
			  STALL_DOP_GATING_DISABLE);
-
 
799
 
-
 
800
	/* Use Force Non-Coherent whenever executing a 3D context. This is a
-
 
801
	 * workaround for a possible hang in the unlikely event a TLB
-
 
802
	 * invalidation occurs during a PSD flush.
-
 
803
	 */
-
 
804
	/* WaForceEnableNonCoherent:chv */
-
 
805
	/* WaHdcDisableFetchWhenMasked:chv */
-
 
806
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-
 
807
			  HDC_FORCE_NON_COHERENT |
-
 
808
			  HDC_DONOT_FETCH_MEM_WHEN_MASKED);
-
 
809
 
-
 
810
	return 0;
-
 
811
}
-
 
812
 
-
 
813
int init_workarounds_ring(struct intel_engine_cs *ring)
-
 
814
{
-
 
815
	struct drm_device *dev = ring->dev;
-
 
816
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
817
 
-
 
818
	WARN_ON(ring->id != RCS);
-
 
819
 
-
 
820
	dev_priv->workarounds.count = 0;
-
 
821
 
-
 
822
	if (IS_BROADWELL(dev))
-
 
823
		return bdw_init_workarounds(ring);
-
 
824
 
-
 
825
	if (IS_CHERRYVIEW(dev))
-
 
826
		return chv_init_workarounds(ring);
-
 
827
 
-
 
828
	return 0;
-
 
829
}
616
 
830
 
617
static int init_render_ring(struct intel_engine_cs *ring)
831
static int init_render_ring(struct intel_engine_cs *ring)
618
{
832
{
619
	struct drm_device *dev = ring->dev;
833
	struct drm_device *dev = ring->dev;
620
	struct drm_i915_private *dev_priv = dev->dev_private;
834
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 630... Line 844...
630
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
844
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
631
	 * programmed to '1' on all products.
845
	 * programmed to '1' on all products.
632
	 *
846
	 *
633
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
847
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
634
	 */
848
	 */
635
	if (INTEL_INFO(dev)->gen >= 6)
849
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
636
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
850
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
Line 637... Line 851...
637
 
851
 
638
	/* Required for the hardware to program scanline values for waiting */
852
	/* Required for the hardware to program scanline values for waiting */
639
	/* WaEnableFlushTlbInvalidationMode:snb */
853
	/* WaEnableFlushTlbInvalidationMode:snb */
Line 646... Line 860...
646
			I915_WRITE(GFX_MODE_GEN7,
860
			I915_WRITE(GFX_MODE_GEN7,
647
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
861
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
648
				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
862
				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
Line 649... Line 863...
649
 
863
 
650
	if (INTEL_INFO(dev)->gen >= 5) {
864
	if (INTEL_INFO(dev)->gen >= 5) {
651
		ret = init_pipe_control(ring);
865
		ret = intel_init_pipe_control(ring);
652
		if (ret)
866
		if (ret)
653
			return ret;
867
			return ret;
Line 654... Line 868...
654
	}
868
	}
Line 667... Line 881...
667
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
881
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
Line 668... Line 882...
668
 
882
 
669
	if (HAS_L3_DPF(dev))
883
	if (HAS_L3_DPF(dev))
Line 670... Line 884...
670
		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
884
		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
671
 
885
 
Line 672... Line 886...
672
	return ret;
886
	return init_workarounds_ring(ring);
673
}
887
}
674
 
888
 
Line 681... Line 895...
681
		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
895
		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
682
		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
896
		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
683
		dev_priv->semaphore_obj = NULL;
897
		dev_priv->semaphore_obj = NULL;
684
	}
898
	}
Line 685... Line 899...
685
 
899
 
686
	if (ring->scratch.obj == NULL)
-
 
687
		return;
-
 
688
 
-
 
689
	if (INTEL_INFO(dev)->gen >= 5) {
-
 
690
//		kunmap(sg_page(ring->scratch.obj->pages->sgl));
-
 
691
		i915_gem_object_ggtt_unpin(ring->scratch.obj);
-
 
692
	}
-
 
693
 
-
 
694
	drm_gem_object_unreference(&ring->scratch.obj->base);
-
 
695
	ring->scratch.obj = NULL;
900
	intel_fini_pipe_control(ring);
Line 696... Line 901...
696
}
901
}
697
 
902
 
698
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
903
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
Line 1013... Line 1218...
1013
{
1218
{
1014
	struct drm_device *dev = ring->dev;
1219
	struct drm_device *dev = ring->dev;
1015
	struct drm_i915_private *dev_priv = dev->dev_private;
1220
	struct drm_i915_private *dev_priv = dev->dev_private;
1016
	unsigned long flags;
1221
	unsigned long flags;
Line 1017... Line 1222...
1017
 
1222
 
1018
	if (!dev->irq_enabled)
1223
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Line 1019... Line 1224...
1019
		return false;
1224
		return false;
1020
 
1225
 
1021
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1226
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 1044... Line 1249...
1044
{
1249
{
1045
	struct drm_device *dev = ring->dev;
1250
	struct drm_device *dev = ring->dev;
1046
	struct drm_i915_private *dev_priv = dev->dev_private;
1251
	struct drm_i915_private *dev_priv = dev->dev_private;
1047
	unsigned long flags;
1252
	unsigned long flags;
Line 1048... Line 1253...
1048
 
1253
 
1049
	if (!dev->irq_enabled)
1254
	if (!intel_irqs_enabled(dev_priv))
Line 1050... Line 1255...
1050
		return false;
1255
		return false;
1051
 
1256
 
1052
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1257
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 1081... Line 1286...
1081
{
1286
{
1082
	struct drm_device *dev = ring->dev;
1287
	struct drm_device *dev = ring->dev;
1083
	struct drm_i915_private *dev_priv = dev->dev_private;
1288
	struct drm_i915_private *dev_priv = dev->dev_private;
1084
	unsigned long flags;
1289
	unsigned long flags;
Line 1085... Line 1290...
1085
 
1290
 
1086
	if (!dev->irq_enabled)
1291
	if (!intel_irqs_enabled(dev_priv))
Line 1087... Line 1292...
1087
		return false;
1292
		return false;
1088
 
1293
 
1089
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1294
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 1215... Line 1420...
1215
{
1420
{
1216
	struct drm_device *dev = ring->dev;
1421
	struct drm_device *dev = ring->dev;
1217
	struct drm_i915_private *dev_priv = dev->dev_private;
1422
	struct drm_i915_private *dev_priv = dev->dev_private;
1218
	unsigned long flags;
1423
	unsigned long flags;
Line 1219... Line 1424...
1219
 
1424
 
1220
	if (!dev->irq_enabled)
1425
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Line 1221... Line 1426...
1221
	       return false;
1426
	       return false;
1222
 
1427
 
1223
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1428
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 1258... Line 1463...
1258
{
1463
{
1259
	struct drm_device *dev = ring->dev;
1464
	struct drm_device *dev = ring->dev;
1260
	struct drm_i915_private *dev_priv = dev->dev_private;
1465
	struct drm_i915_private *dev_priv = dev->dev_private;
1261
	unsigned long flags;
1466
	unsigned long flags;
Line 1262... Line 1467...
1262
 
1467
 
1263
	if (!dev->irq_enabled)
1468
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Line 1264... Line 1469...
1264
		return false;
1469
		return false;
1265
 
1470
 
1266
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1471
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 1278... Line 1483...
1278
{
1483
{
1279
	struct drm_device *dev = ring->dev;
1484
	struct drm_device *dev = ring->dev;
1280
	struct drm_i915_private *dev_priv = dev->dev_private;
1485
	struct drm_i915_private *dev_priv = dev->dev_private;
1281
	unsigned long flags;
1486
	unsigned long flags;
Line 1282... Line -...
1282
 
-
 
1283
	if (!dev->irq_enabled)
-
 
1284
		return;
-
 
1285
 
1487
 
1286
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1488
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1287
	if (--ring->irq_refcount == 0) {
1489
	if (--ring->irq_refcount == 0) {
1288
		I915_WRITE_IMR(ring, ~0);
1490
		I915_WRITE_IMR(ring, ~0);
1289
		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1491
		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
Line 1296... Line 1498...
1296
{
1498
{
1297
	struct drm_device *dev = ring->dev;
1499
	struct drm_device *dev = ring->dev;
1298
	struct drm_i915_private *dev_priv = dev->dev_private;
1500
	struct drm_i915_private *dev_priv = dev->dev_private;
1299
	unsigned long flags;
1501
	unsigned long flags;
Line 1300... Line 1502...
1300
 
1502
 
1301
	if (!dev->irq_enabled)
1503
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Line 1302... Line 1504...
1302
		return false;
1504
		return false;
1303
 
1505
 
1304
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1506
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 1447... Line 1649...
1447
 
1649
 
1448
	obj = ring->status_page.obj;
1650
	obj = ring->status_page.obj;
1449
	if (obj == NULL)
1651
	if (obj == NULL)
Line 1450... Line 1652...
1450
		return;
1652
		return;
1451
 
1653
 
1452
//	kunmap(sg_page(obj->pages->sgl));
1654
	kunmap(sg_page(obj->pages->sgl));
1453
	i915_gem_object_ggtt_unpin(obj);
1655
	i915_gem_object_ggtt_unpin(obj);
1454
	drm_gem_object_unreference(&obj->base);
1656
	drm_gem_object_unreference(&obj->base);
Line 1495... Line 1697...
1495
 
1697
 
1496
		ring->status_page.obj = obj;
1698
		ring->status_page.obj = obj;
Line 1497... Line 1699...
1497
	}
1699
	}
1498
 
1700
 
1499
	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1701
	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
Line 1500... Line 1702...
1500
    ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
1702
	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1501
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1703
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Line 1521... Line 1723...
1521
    memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1723
    memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Line 1522... Line 1724...
1522
 
1724
 
1523
    return 0;
1725
    return 0;
Line 1524... Line 1726...
1524
}
1726
}
1525
 
1727
 
1526
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
-
 
1527
{
-
 
1528
	if (!ringbuf->obj)
-
 
1529
		return;
1728
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
-
 
1729
{
1530
 
1730
	iounmap(ringbuf->virtual_start);
1531
	iounmap(ringbuf->virtual_start);
-
 
1532
	i915_gem_object_ggtt_unpin(ringbuf->obj);
-
 
1533
	drm_gem_object_unreference(&ringbuf->obj->base);
1731
	ringbuf->virtual_start = NULL;
Line 1534... Line 1732...
1534
	ringbuf->obj = NULL;
1732
	i915_gem_object_ggtt_unpin(ringbuf->obj);
1535
}
1733
}
1536
 
1734
 
1537
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1735
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1538
				      struct intel_ringbuffer *ringbuf)
1736
				      struct intel_ringbuffer *ringbuf)
1539
{
1737
{
Line -... Line 1738...
-
 
1738
	struct drm_i915_private *dev_priv = to_i915(dev);
1540
	struct drm_i915_private *dev_priv = to_i915(dev);
1739
	struct drm_i915_gem_object *obj = ringbuf->obj;
-
 
1740
	int ret;
-
 
1741
 
-
 
1742
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-
 
1743
	if (ret)
-
 
1744
		return ret;
-
 
1745
 
-
 
1746
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
-
 
1747
	if (ret) {
-
 
1748
		i915_gem_object_ggtt_unpin(obj);
-
 
1749
		return ret;
-
 
1750
	}
-
 
1751
 
-
 
1752
	ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
-
 
1753
			i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-
 
1754
	if (ringbuf->virtual_start == NULL) {
1541
	struct drm_i915_gem_object *obj;
1755
		i915_gem_object_ggtt_unpin(obj);
-
 
1756
		return -EINVAL;
-
 
1757
	}
-
 
1758
 
-
 
1759
		return 0;
-
 
1760
}
-
 
1761
 
-
 
1762
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
-
 
1763
{
-
 
1764
	drm_gem_object_unreference(&ringbuf->obj->base);
-
 
1765
	ringbuf->obj = NULL;
-
 
1766
}
-
 
1767
 
Line 1542... Line 1768...
1542
	int ret;
1768
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1543
 
1769
			       struct intel_ringbuffer *ringbuf)
1544
	if (ringbuf->obj)
1770
{
1545
		return 0;
1771
	struct drm_i915_gem_object *obj;
Line 1553... Line 1779...
1553
		return -ENOMEM;
1779
		return -ENOMEM;
Line 1554... Line 1780...
1554
 
1780
 
1555
	/* mark ring buffers as read-only from GPU side by default */
1781
	/* mark ring buffers as read-only from GPU side by default */
Line 1556... Line -...
1556
	obj->gt_ro = 1;
-
 
1557
 
-
 
1558
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-
 
1559
	if (ret)
-
 
1560
		goto err_unref;
-
 
1561
 
-
 
1562
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
-
 
1563
	if (ret)
-
 
1564
		goto err_unpin;
-
 
1565
 
-
 
1566
	ringbuf->virtual_start =
-
 
1567
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-
 
1568
				ringbuf->size);
-
 
1569
	if (ringbuf->virtual_start == NULL) {
-
 
1570
		ret = -EINVAL;
-
 
1571
		goto err_unpin;
-
 
1572
	}
1782
	obj->gt_ro = 1;
1573
 
-
 
Line 1574... Line -...
1574
	ringbuf->obj = obj;
-
 
1575
	return 0;
-
 
1576
 
-
 
1577
err_unpin:
-
 
1578
	i915_gem_object_ggtt_unpin(obj);
1783
 
1579
err_unref:
1784
	ringbuf->obj = obj;
Line 1580... Line 1785...
1580
	drm_gem_object_unreference(&obj->base);
1785
 
1581
	return ret;
1786
	return 0;
1582
}
1787
}
Line 1595... Line 1800...
1595
	}
1800
	}
Line 1596... Line 1801...
1596
 
1801
 
1597
	ring->dev = dev;
1802
	ring->dev = dev;
1598
	INIT_LIST_HEAD(&ring->active_list);
1803
	INIT_LIST_HEAD(&ring->active_list);
-
 
1804
	INIT_LIST_HEAD(&ring->request_list);
1599
	INIT_LIST_HEAD(&ring->request_list);
1805
	INIT_LIST_HEAD(&ring->execlist_queue);
-
 
1806
	ringbuf->size = 32 * PAGE_SIZE;
1600
	ringbuf->size = 32 * PAGE_SIZE;
1807
	ringbuf->ring = ring;
Line 1601... Line 1808...
1601
	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1808
	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
Line 1602... Line 1809...
1602
 
1809
 
Line 1611... Line 1818...
1611
		ret = init_phys_status_page(ring);
1818
		ret = init_phys_status_page(ring);
1612
	if (ret)
1819
	if (ret)
1613
			goto error;
1820
			goto error;
1614
	}
1821
	}
Line -... Line 1822...
-
 
1822
 
1615
 
1823
	if (ringbuf->obj == NULL) {
1616
	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1824
	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1617
	if (ret) {
1825
	if (ret) {
-
 
1826
			DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
-
 
1827
					ring->name, ret);
-
 
1828
			goto error;
-
 
1829
		}
-
 
1830
 
-
 
1831
		ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
-
 
1832
		if (ret) {
-
 
1833
			DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-
 
1834
					ring->name, ret);
1618
		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1835
			intel_destroy_ringbuffer_obj(ringbuf);
1619
		goto error;
1836
		goto error;
-
 
1837
	}
Line 1620... Line 1838...
1620
	}
1838
	}
1621
 
1839
 
1622
	/* Workaround an erratum on the i830 which causes a hang if
1840
	/* Workaround an erratum on the i830 which causes a hang if
1623
	 * the TAIL pointer points to within the last 2 cachelines
1841
	 * the TAIL pointer points to within the last 2 cachelines
Line 1643... Line 1861...
1643
	return ret;
1861
	return ret;
1644
}
1862
}
Line 1645... Line 1863...
1645
 
1863
 
1646
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1864
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1647
{
1865
{
1648
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
1866
	struct drm_i915_private *dev_priv;
Line 1649... Line 1867...
1649
	struct intel_ringbuffer *ringbuf = ring->buffer;
1867
	struct intel_ringbuffer *ringbuf;
1650
 
1868
 
Line -... Line 1869...
-
 
1869
	if (!intel_ring_initialized(ring))
-
 
1870
		return;
-
 
1871
 
1651
	if (!intel_ring_initialized(ring))
1872
	dev_priv = to_i915(ring->dev);
1652
		return;
1873
	ringbuf = ring->buffer;
Line -... Line 1874...
-
 
1874
 
1653
 
1875
	intel_stop_ring_buffer(ring);
1654
	intel_stop_ring_buffer(ring);
1876
	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1655
	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1877
 
Line 1656... Line 1878...
1656
 
1878
	intel_unpin_ringbuffer_obj(ringbuf);
Line 1678... Line 1900...
1678
 
1900
 
1679
	if (ringbuf->last_retired_head != -1) {
1901
	if (ringbuf->last_retired_head != -1) {
1680
		ringbuf->head = ringbuf->last_retired_head;
1902
		ringbuf->head = ringbuf->last_retired_head;
Line 1681... Line 1903...
1681
		ringbuf->last_retired_head = -1;
1903
		ringbuf->last_retired_head = -1;
1682
 
1904
 
1683
		ringbuf->space = ring_space(ringbuf);
1905
		ringbuf->space = intel_ring_space(ringbuf);
1684
		if (ringbuf->space >= n)
1906
		if (ringbuf->space >= n)
Line 1685... Line 1907...
1685
			return 0;
1907
			return 0;
1686
	}
1908
	}
-
 
1909
 
1687
 
1910
	list_for_each_entry(request, &ring->request_list, list) {
1688
	list_for_each_entry(request, &ring->request_list, list) {
1911
		if (__intel_ring_space(request->tail, ringbuf->tail,
1689
		if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
1912
				       ringbuf->size) >= n) {
1690
			seqno = request->seqno;
1913
			seqno = request->seqno;
Line 1701... Line 1924...
1701
 
1924
 
1702
	i915_gem_retire_requests_ring(ring);
1925
	i915_gem_retire_requests_ring(ring);
1703
	ringbuf->head = ringbuf->last_retired_head;
1926
	ringbuf->head = ringbuf->last_retired_head;
Line 1704... Line 1927...
1704
	ringbuf->last_retired_head = -1;
1927
	ringbuf->last_retired_head = -1;
1705
 
1928
 
1706
	ringbuf->space = ring_space(ringbuf);
1929
	ringbuf->space = intel_ring_space(ringbuf);
Line 1707... Line 1930...
1707
	return 0;
1930
	return 0;
1708
}
1931
}
Line 1730... Line 1953...
1730
	end = jiffies + 60 * HZ;
1953
	end = jiffies + 60 * HZ;
Line 1731... Line 1954...
1731
 
1954
 
1732
	trace_i915_ring_wait_begin(ring);
1955
	trace_i915_ring_wait_begin(ring);
1733
	do {
1956
	do {
1734
		ringbuf->head = I915_READ_HEAD(ring);
1957
		ringbuf->head = I915_READ_HEAD(ring);
1735
		ringbuf->space = ring_space(ringbuf);
1958
		ringbuf->space = intel_ring_space(ringbuf);
1736
		if (ringbuf->space >= n) {
1959
		if (ringbuf->space >= n) {
1737
			ret = 0;
1960
			ret = 0;
1738
			break;
1961
			break;
Line 1739... Line -...
1739
		}
-
 
1740
 
1962
		}
Line 1741... Line 1963...
1741
 
1963
 
1742
		msleep(1);
1964
		msleep(1);
1743
 
1965
 
Line 1771... Line 1993...
1771
	rem /= 4;
1993
	rem /= 4;
1772
	while (rem--)
1994
	while (rem--)
1773
		iowrite32(MI_NOOP, virt++);
1995
		iowrite32(MI_NOOP, virt++);
Line 1774... Line 1996...
1774
 
1996
 
1775
	ringbuf->tail = 0;
1997
	ringbuf->tail = 0;
Line 1776... Line 1998...
1776
	ringbuf->space = ring_space(ringbuf);
1998
	ringbuf->space = intel_ring_space(ringbuf);
1777
 
1999
 
Line 1778... Line 2000...
1778
	return 0;
2000
	return 0;
Line 1976... Line 2198...
1976
static int
2198
static int
1977
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2199
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1978
			      u64 offset, u32 len,
2200
			      u64 offset, u32 len,
1979
			      unsigned flags)
2201
			      unsigned flags)
1980
{
2202
{
1981
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1982
	bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
-
 
1983
		!(flags & I915_DISPATCH_SECURE);
2203
	bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
1984
	int ret;
2204
	int ret;
Line 1985... Line 2205...
1985
 
2205
 
1986
	ret = intel_ring_begin(ring, 4);
2206
	ret = intel_ring_begin(ring, 4);
1987
	if (ret)
2207
	if (ret)
Line 2007... Line 2227...
2007
	ret = intel_ring_begin(ring, 2);
2227
	ret = intel_ring_begin(ring, 2);
2008
	if (ret)
2228
	if (ret)
2009
		return ret;
2229
		return ret;
Line 2010... Line 2230...
2010
 
2230
 
2011
	intel_ring_emit(ring,
2231
	intel_ring_emit(ring,
-
 
2232
			MI_BATCH_BUFFER_START |
2012
			MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
2233
			(flags & I915_DISPATCH_SECURE ?
2013
			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
2234
			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2014
	/* bit0-7 is the length on GEN6+ */
2235
	/* bit0-7 is the length on GEN6+ */
2015
	intel_ring_emit(ring, offset);
2236
	intel_ring_emit(ring, offset);
Line 2016... Line 2237...
2016
	intel_ring_advance(ring);
2237
	intel_ring_advance(ring);
Line 2043... Line 2264...
2043
 
2264
 
2044
static int gen6_ring_flush(struct intel_engine_cs *ring,
2265
static int gen6_ring_flush(struct intel_engine_cs *ring,
2045
			  u32 invalidate, u32 flush)
2266
			  u32 invalidate, u32 flush)
2046
{
2267
{
-
 
2268
	struct drm_device *dev = ring->dev;
2047
	struct drm_device *dev = ring->dev;
2269
	struct drm_i915_private *dev_priv = dev->dev_private;
2048
	uint32_t cmd;
2270
	uint32_t cmd;
Line 2049... Line 2271...
2049
	int ret;
2271
	int ret;
2050
 
2272
 
Line 2073... Line 2295...
2073
	intel_ring_emit(ring, 0);
2295
	intel_ring_emit(ring, 0);
2074
	intel_ring_emit(ring, MI_NOOP);
2296
	intel_ring_emit(ring, MI_NOOP);
2075
	}
2297
	}
2076
	intel_ring_advance(ring);
2298
	intel_ring_advance(ring);
Line 2077... Line 2299...
2077
 
2299
 
-
 
2300
	if (!invalidate && flush) {
2078
	if (IS_GEN7(dev) && !invalidate && flush)
2301
		if (IS_GEN7(dev))
-
 
2302
		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
-
 
2303
		else if (IS_BROADWELL(dev))
-
 
2304
			dev_priv->fbc.need_sw_cache_clean = true;
Line 2079... Line 2305...
2079
		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2305
	}
2080
 
2306
 
Line 2081... Line 2307...
2081
	return 0;
2307
	return 0;
Line 2107... Line 2333...
2107
					i915.semaphores = 0;
2333
					i915.semaphores = 0;
2108
				} else
2334
				} else
2109
					dev_priv->semaphore_obj = obj;
2335
					dev_priv->semaphore_obj = obj;
2110
			}
2336
			}
2111
		}
2337
		}
-
 
2338
 
-
 
2339
		ring->init_context = intel_ring_workarounds_emit;
2112
		ring->add_request = gen6_add_request;
2340
		ring->add_request = gen6_add_request;
2113
		ring->flush = gen8_render_ring_flush;
2341
		ring->flush = gen8_render_ring_flush;
2114
		ring->irq_get = gen8_ring_get_irq;
2342
		ring->irq_get = gen8_ring_get_irq;
2115
		ring->irq_put = gen8_ring_put_irq;
2343
		ring->irq_put = gen8_ring_put_irq;
2116
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2344
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
Line 2216... Line 2444...
2216
	}
2444
	}
Line 2217... Line 2445...
2217
 
2445
 
2218
	return intel_init_ring_buffer(dev, ring);
2446
	return intel_init_ring_buffer(dev, ring);
Line 2219... Line -...
2219
}
-
 
2220
 
-
 
2221
#if 0
-
 
2222
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-
 
2223
{
-
 
2224
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2225
	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
-
 
2226
	struct intel_ringbuffer *ringbuf = ring->buffer;
-
 
2227
	int ret;
-
 
2228
 
-
 
2229
	if (ringbuf == NULL) {
-
 
2230
		ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-
 
2231
		if (!ringbuf)
-
 
2232
			return -ENOMEM;
-
 
2233
		ring->buffer = ringbuf;
-
 
2234
	}
-
 
2235
 
-
 
2236
	ring->name = "render ring";
-
 
2237
	ring->id = RCS;
-
 
2238
	ring->mmio_base = RENDER_RING_BASE;
-
 
2239
 
-
 
2240
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
2241
		/* non-kms not supported on gen6+ */
-
 
2242
		ret = -ENODEV;
-
 
2243
		goto err_ringbuf;
-
 
2244
	}
-
 
2245
 
-
 
2246
	/* Note: gem is not supported on gen5/ilk without kms (the corresponding
-
 
2247
	 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
-
 
2248
	 * the special gen5 functions. */
-
 
2249
	ring->add_request = i9xx_add_request;
-
 
2250
	if (INTEL_INFO(dev)->gen < 4)
-
 
2251
		ring->flush = gen2_render_ring_flush;
-
 
2252
	else
-
 
2253
		ring->flush = gen4_render_ring_flush;
-
 
2254
	ring->get_seqno = ring_get_seqno;
-
 
2255
	ring->set_seqno = ring_set_seqno;
-
 
2256
	if (IS_GEN2(dev)) {
-
 
2257
		ring->irq_get = i8xx_ring_get_irq;
-
 
2258
		ring->irq_put = i8xx_ring_put_irq;
-
 
2259
	} else {
-
 
2260
		ring->irq_get = i9xx_ring_get_irq;
-
 
2261
		ring->irq_put = i9xx_ring_put_irq;
-
 
2262
	}
-
 
2263
	ring->irq_enable_mask = I915_USER_INTERRUPT;
-
 
2264
	ring->write_tail = ring_write_tail;
-
 
2265
	if (INTEL_INFO(dev)->gen >= 4)
-
 
2266
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
-
 
2267
	else if (IS_I830(dev) || IS_845G(dev))
-
 
2268
		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
-
 
2269
	else
-
 
2270
		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-
 
2271
	ring->init = init_render_ring;
-
 
2272
	ring->cleanup = render_ring_cleanup;
-
 
2273
 
-
 
2274
	ring->dev = dev;
-
 
2275
	INIT_LIST_HEAD(&ring->active_list);
-
 
2276
	INIT_LIST_HEAD(&ring->request_list);
-
 
2277
 
-
 
2278
	ringbuf->size = size;
-
 
2279
	ringbuf->effective_size = ringbuf->size;
-
 
2280
	if (IS_I830(ring->dev) || IS_845G(ring->dev))
-
 
2281
		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
 
2282
 
-
 
2283
	ringbuf->virtual_start = ioremap_wc(start, size);
-
 
2284
	if (ringbuf->virtual_start == NULL) {
-
 
2285
		DRM_ERROR("can not ioremap virtual address for"
-
 
2286
			  " ring buffer\n");
-
 
2287
		ret = -ENOMEM;
-
 
2288
		goto err_ringbuf;
-
 
2289
	}
-
 
2290
 
-
 
2291
	if (!I915_NEED_GFX_HWS(dev)) {
-
 
2292
		ret = init_phys_status_page(ring);
-
 
2293
		if (ret)
-
 
2294
			goto err_vstart;
-
 
2295
	}
-
 
2296
 
-
 
2297
	return 0;
-
 
2298
 
-
 
2299
err_vstart:
-
 
2300
	iounmap(ringbuf->virtual_start);
-
 
2301
err_ringbuf:
-
 
2302
	kfree(ringbuf);
-
 
2303
	ring->buffer = NULL;
-
 
2304
	return ret;
-
 
2305
}
-
 
2306
#endif
2447
}
2307
 
2448
 
2308
int intel_init_bsd_ring_buffer(struct drm_device *dev)
2449
int intel_init_bsd_ring_buffer(struct drm_device *dev)
2309
{
2450
{