Subversion Repositories Kolibri OS

Rev

Rev 3037 | Rev 3480 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3037 Rev 3243
Line 45... Line 45...
45
	u32 gtt_offset;
45
	u32 gtt_offset;
46
};
46
};
Line 47... Line 47...
47
 
47
 
48
static inline int ring_space(struct intel_ring_buffer *ring)
48
static inline int ring_space(struct intel_ring_buffer *ring)
49
{
49
{
50
	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50
	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
51
	if (space < 0)
51
	if (space < 0)
52
		space += ring->size;
52
		space += ring->size;
53
	return space;
53
	return space;
Line 245... Line 245...
245
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
245
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
246
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
246
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
247
		/*
247
		/*
248
		 * TLB invalidate requires a post-sync write.
248
		 * TLB invalidate requires a post-sync write.
249
		 */
249
		 */
250
		flags |= PIPE_CONTROL_QW_WRITE;
250
		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
251
	}
251
	}
Line 252... Line 252...
252
 
252
 
253
	ret = intel_ring_begin(ring, 4);
253
	ret = intel_ring_begin(ring, 4);
254
	if (ret)
254
	if (ret)
Line 459... Line 459...
459
	ret = i915_gem_object_pin(obj, 4096, true, false);
459
	ret = i915_gem_object_pin(obj, 4096, true, false);
460
	if (ret)
460
	if (ret)
461
		goto err_unref;
461
		goto err_unref;
Line 462... Line 462...
462
 
462
 
463
	pc->gtt_offset = obj->gtt_offset;
463
	pc->gtt_offset = obj->gtt_offset;
464
    pc->cpu_page =  (void*)MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW);
464
	pc->cpu_page =  (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
465
	if (pc->cpu_page == NULL)
465
	if (pc->cpu_page == NULL)
Line 466... Line 466...
466
		goto err_unpin;
466
		goto err_unpin;
467
 
467
 
Line 500... Line 500...
500
{
500
{
501
	struct drm_device *dev = ring->dev;
501
	struct drm_device *dev = ring->dev;
502
	struct drm_i915_private *dev_priv = dev->dev_private;
502
	struct drm_i915_private *dev_priv = dev->dev_private;
503
	int ret = init_ring_common(ring);
503
	int ret = init_ring_common(ring);
Line 504... Line 504...
504
 
504
 
505
	if (INTEL_INFO(dev)->gen > 3) {
505
	if (INTEL_INFO(dev)->gen > 3)
-
 
506
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-
 
507
 
-
 
508
	/* We need to disable the AsyncFlip performance optimisations in order
-
 
509
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
-
 
510
	 * programmed to '1' on all products.
-
 
511
	 */
-
 
512
	if (INTEL_INFO(dev)->gen >= 6)
-
 
513
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
 
514
 
-
 
515
	/* Required for the hardware to program scanline values for waiting */
-
 
516
	if (INTEL_INFO(dev)->gen == 6)
-
 
517
		I915_WRITE(GFX_MODE,
-
 
518
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
506
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
519
 
507
		if (IS_GEN7(dev))
520
		if (IS_GEN7(dev))
508
			I915_WRITE(GFX_MODE_GEN7,
521
			I915_WRITE(GFX_MODE_GEN7,
509
				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
522
				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
510
				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-
 
Line 511... Line 523...
511
	}
523
				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
512
 
524
 
513
	if (INTEL_INFO(dev)->gen >= 5) {
525
	if (INTEL_INFO(dev)->gen >= 5) {
514
		ret = init_pipe_control(ring);
526
		ret = init_pipe_control(ring);
Line 550... Line 562...
550
	cleanup_pipe_control(ring);
562
	cleanup_pipe_control(ring);
551
}
563
}
Line 552... Line 564...
552
 
564
 
553
static void
565
static void
554
update_mboxes(struct intel_ring_buffer *ring,
-
 
555
	    u32 seqno,
566
update_mboxes(struct intel_ring_buffer *ring,
556
	    u32 mmio_offset)
567
	    u32 mmio_offset)
557
{
568
{
558
	intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
-
 
559
			      MI_SEMAPHORE_GLOBAL_GTT |
-
 
560
			MI_SEMAPHORE_REGISTER |
-
 
561
			MI_SEMAPHORE_UPDATE);
-
 
562
	intel_ring_emit(ring, seqno);
569
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-
 
570
	intel_ring_emit(ring, mmio_offset);
563
	intel_ring_emit(ring, mmio_offset);
571
	intel_ring_emit(ring, ring->outstanding_lazy_request);
Line 564... Line 572...
564
}
572
}
565
 
573
 
566
/**
574
/**
Line 571... Line 579...
571
 *
579
 *
572
 * Update the mailbox registers in the *other* rings with the current seqno.
580
 * Update the mailbox registers in the *other* rings with the current seqno.
573
 * This acts like a signal in the canonical semaphore.
581
 * This acts like a signal in the canonical semaphore.
574
 */
582
 */
575
static int
583
static int
576
gen6_add_request(struct intel_ring_buffer *ring,
584
gen6_add_request(struct intel_ring_buffer *ring)
577
		 u32 *seqno)
-
 
578
{
585
{
579
	u32 mbox1_reg;
586
	u32 mbox1_reg;
580
	u32 mbox2_reg;
587
	u32 mbox2_reg;
581
	int ret;
588
	int ret;
Line 585... Line 592...
585
		return ret;
592
		return ret;
Line 586... Line 593...
586
 
593
 
587
	mbox1_reg = ring->signal_mbox[0];
594
	mbox1_reg = ring->signal_mbox[0];
Line 588... Line -...
588
	mbox2_reg = ring->signal_mbox[1];
-
 
589
 
-
 
590
	*seqno = i915_gem_next_request_seqno(ring);
595
	mbox2_reg = ring->signal_mbox[1];
591
 
596
 
592
	update_mboxes(ring, *seqno, mbox1_reg);
597
	update_mboxes(ring, mbox1_reg);
593
	update_mboxes(ring, *seqno, mbox2_reg);
598
	update_mboxes(ring, mbox2_reg);
594
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
599
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
595
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
600
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
596
	intel_ring_emit(ring, *seqno);
601
	intel_ring_emit(ring, ring->outstanding_lazy_request);
Line 597... Line 602...
597
	intel_ring_emit(ring, MI_USER_INTERRUPT);
602
	intel_ring_emit(ring, MI_USER_INTERRUPT);
598
	intel_ring_advance(ring);
603
	intel_ring_advance(ring);
Line 648... Line 653...
648
	intel_ring_emit(ring__, 0);							\
653
	intel_ring_emit(ring__, 0);							\
649
	intel_ring_emit(ring__, 0);							\
654
	intel_ring_emit(ring__, 0);							\
650
} while (0)
655
} while (0)
Line 651... Line 656...
651
 
656
 
652
static int
657
static int
653
pc_render_add_request(struct intel_ring_buffer *ring,
-
 
654
		      u32 *result)
658
pc_render_add_request(struct intel_ring_buffer *ring)
655
{
-
 
656
	u32 seqno = i915_gem_next_request_seqno(ring);
659
{
657
	struct pipe_control *pc = ring->private;
660
	struct pipe_control *pc = ring->private;
658
	u32 scratch_addr = pc->gtt_offset + 128;
661
	u32 scratch_addr = pc->gtt_offset + 128;
Line 659... Line 662...
659
	int ret;
662
	int ret;
Line 672... Line 675...
672
 
675
 
673
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
676
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
674
			PIPE_CONTROL_WRITE_FLUSH |
677
			PIPE_CONTROL_WRITE_FLUSH |
675
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
678
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
676
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
679
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
677
	intel_ring_emit(ring, seqno);
680
	intel_ring_emit(ring, ring->outstanding_lazy_request);
678
	intel_ring_emit(ring, 0);
681
	intel_ring_emit(ring, 0);
679
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
682
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
680
	scratch_addr += 128; /* write to separate cachelines */
683
	scratch_addr += 128; /* write to separate cachelines */
681
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
684
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
Line 691... Line 694...
691
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
694
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
692
			PIPE_CONTROL_WRITE_FLUSH |
695
			PIPE_CONTROL_WRITE_FLUSH |
693
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
696
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
694
			PIPE_CONTROL_NOTIFY);
697
			PIPE_CONTROL_NOTIFY);
695
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
698
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
696
	intel_ring_emit(ring, seqno);
699
	intel_ring_emit(ring, ring->outstanding_lazy_request);
697
	intel_ring_emit(ring, 0);
700
	intel_ring_emit(ring, 0);
698
	intel_ring_advance(ring);
701
	intel_ring_advance(ring);
Line 699... Line -...
699
 
-
 
700
	*result = seqno;
702
 
701
	return 0;
703
	return 0;
Line 702... Line 704...
702
}
704
}
703
 
705
 
Line 883... Line 885...
883
	intel_ring_advance(ring);
885
	intel_ring_advance(ring);
884
	return 0;
886
	return 0;
885
}
887
}
Line 886... Line 888...
886
 
888
 
887
static int
889
static int
888
i9xx_add_request(struct intel_ring_buffer *ring,
-
 
889
		 u32 *result)
890
i9xx_add_request(struct intel_ring_buffer *ring)
890
{
-
 
891
	u32 seqno;
891
{
Line 892... Line 892...
892
	int ret;
892
	int ret;
893
 
893
 
894
	ret = intel_ring_begin(ring, 4);
894
	ret = intel_ring_begin(ring, 4);
Line 895... Line -...
895
	if (ret)
-
 
896
		return ret;
-
 
897
 
895
	if (ret)
898
	seqno = i915_gem_next_request_seqno(ring);
896
		return ret;
899
 
897
 
900
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
898
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
901
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
899
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Line 902... Line -...
902
	intel_ring_emit(ring, seqno);
-
 
903
	intel_ring_emit(ring, MI_USER_INTERRUPT);
900
	intel_ring_emit(ring, ring->outstanding_lazy_request);
904
	intel_ring_advance(ring);
901
	intel_ring_emit(ring, MI_USER_INTERRUPT);
Line 905... Line 902...
905
 
902
	intel_ring_advance(ring);
906
	*result = seqno;
903
 
Line 959... Line 956...
959
 
956
 
960
		gen6_gt_force_wake_put(dev_priv);
957
		gen6_gt_force_wake_put(dev_priv);
Line 961... Line 958...
961
}
958
}
962
 
959
 
-
 
960
static int
-
 
961
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
963
static int
962
			 u32 offset, u32 length,
964
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
963
			 unsigned flags)
Line 965... Line 964...
965
{
964
{
966
	int ret;
965
	int ret;
967
 
966
 
Line 968... Line 967...
968
	ret = intel_ring_begin(ring, 2);
967
	ret = intel_ring_begin(ring, 2);
969
	if (ret)
968
	if (ret)
970
		return ret;
969
		return ret;
971
 
970
 
972
	intel_ring_emit(ring,
971
	intel_ring_emit(ring,
973
			MI_BATCH_BUFFER_START |
972
			MI_BATCH_BUFFER_START |
Line 974... Line 973...
974
			MI_BATCH_GTT |
973
			MI_BATCH_GTT |
975
			MI_BATCH_NON_SECURE_I965);
974
			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
Line -... Line 975...
-
 
975
	intel_ring_emit(ring, offset);
-
 
976
	intel_ring_advance(ring);
976
	intel_ring_emit(ring, offset);
977
 
977
	intel_ring_advance(ring);
978
	return 0;
978
 
979
}
-
 
980
 
979
	return 0;
981
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
980
}
982
#define I830_BATCH_LIMIT (256*1024)
Line -... Line 983...
-
 
983
static int
981
 
984
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
982
static int
985
				u32 offset, u32 len,
983
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
986
				unsigned flags)
Line 984... Line 987...
984
				u32 offset, u32 len)
987
{
985
{
988
	int ret;
986
	int ret;
989
 
-
 
990
	if (flags & I915_DISPATCH_PINNED) {
-
 
991
		ret = intel_ring_begin(ring, 4);
-
 
992
		if (ret)
-
 
993
			return ret;
-
 
994
 
-
 
995
		intel_ring_emit(ring, MI_BATCH_BUFFER);
-
 
996
		intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-
 
997
		intel_ring_emit(ring, offset + len - 8);
-
 
998
		intel_ring_emit(ring, MI_NOOP);
-
 
999
		intel_ring_advance(ring);
-
 
1000
	} else {
-
 
1001
		struct drm_i915_gem_object *obj = ring->private;
-
 
1002
		u32 cs_offset = obj->gtt_offset;
-
 
1003
 
-
 
1004
		if (len > I830_BATCH_LIMIT)
-
 
1005
			return -ENOSPC;
-
 
1006
 
-
 
1007
		ret = intel_ring_begin(ring, 9+3);
-
 
1008
		if (ret)
987
 
1009
			return ret;
-
 
1010
		/* Blit the batch (which has now all relocs applied) to the stable batch
-
 
1011
		 * scratch bo area (so that the CS never stumbles over its tlb
-
 
1012
		 * invalidation bug) ... */
-
 
1013
		intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
-
 
1014
				XY_SRC_COPY_BLT_WRITE_ALPHA |
-
 
1015
				XY_SRC_COPY_BLT_WRITE_RGB);
-
 
1016
		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
-
 
1017
		intel_ring_emit(ring, 0);
-
 
1018
		intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
-
 
1019
		intel_ring_emit(ring, cs_offset);
-
 
1020
		intel_ring_emit(ring, 0);
988
		ret = intel_ring_begin(ring, 4);
1021
		intel_ring_emit(ring, 4096);
-
 
1022
		intel_ring_emit(ring, offset);
Line 989... Line 1023...
989
		if (ret)
1023
		intel_ring_emit(ring, MI_FLUSH);
990
			return ret;
1024
 
Line 991... Line 1025...
991
 
1025
		/* ... and execute it. */
992
		intel_ring_emit(ring, MI_BATCH_BUFFER);
1026
		intel_ring_emit(ring, MI_BATCH_BUFFER);
993
		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
1027
		intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-
 
1028
		intel_ring_emit(ring, cs_offset + len - 8);
994
		intel_ring_emit(ring, offset + len - 8);
1029
	intel_ring_advance(ring);
995
		intel_ring_emit(ring, 0);
1030
	}
Line 996... Line 1031...
996
	intel_ring_advance(ring);
1031
 
997
 
1032
	return 0;
998
	return 0;
1033
}
Line 999... Line 1034...
999
}
1034
 
1000
 
1035
static int
1001
static int
1036
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
Line 1002... Line 1037...
1002
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1037
			 u32 offset, u32 len,
1003
				u32 offset, u32 len)
1038
			 unsigned flags)
Line 1048... Line 1083...
1048
	if (ret != 0) {
1083
	if (ret != 0) {
1049
		goto err_unref;
1084
		goto err_unref;
1050
	}
1085
	}
Line 1051... Line 1086...
1051
 
1086
 
1052
	ring->status_page.gfx_addr = obj->gtt_offset;
1087
	ring->status_page.gfx_addr = obj->gtt_offset;
1053
    ring->status_page.page_addr = (void*)MapIoMem(obj->pages.page[0],4096,PG_SW);
1088
    ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
1054
	if (ring->status_page.page_addr == NULL) {
1089
	if (ring->status_page.page_addr == NULL) {
1055
		ret = -ENOMEM;
1090
		ret = -ENOMEM;
1056
		goto err_unpin;
1091
		goto err_unpin;
1057
	}
1092
	}
Line 1070... Line 1105...
1070
	drm_gem_object_unreference(&obj->base);
1105
	drm_gem_object_unreference(&obj->base);
1071
err:
1106
err:
1072
	return ret;
1107
	return ret;
1073
}
1108
}
Line -... Line 1109...
-
 
1109
 
-
 
1110
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
-
 
1111
{
-
 
1112
    struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1113
    u32 addr;
-
 
1114
 
-
 
1115
    if (!dev_priv->status_page_dmah) {
-
 
1116
        dev_priv->status_page_dmah =
-
 
1117
            drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
-
 
1118
        if (!dev_priv->status_page_dmah)
-
 
1119
            return -ENOMEM;
-
 
1120
    }
-
 
1121
 
-
 
1122
    addr = dev_priv->status_page_dmah->busaddr;
-
 
1123
    if (INTEL_INFO(ring->dev)->gen >= 4)
-
 
1124
        addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
-
 
1125
    I915_WRITE(HWS_PGA, addr);
-
 
1126
 
-
 
1127
    ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-
 
1128
    memset(ring->status_page.page_addr, 0, PAGE_SIZE);
-
 
1129
 
-
 
1130
    return 0;
-
 
1131
}
1074
 
1132
 
1075
static int intel_init_ring_buffer(struct drm_device *dev,
1133
static int intel_init_ring_buffer(struct drm_device *dev,
1076
			   struct intel_ring_buffer *ring)
1134
			   struct intel_ring_buffer *ring)
1077
{
1135
{
1078
	struct drm_i915_gem_object *obj;
1136
	struct drm_i915_gem_object *obj;
Line 1081... Line 1139...
1081
 
1139
 
1082
	ring->dev = dev;
1140
	ring->dev = dev;
1083
	INIT_LIST_HEAD(&ring->active_list);
1141
	INIT_LIST_HEAD(&ring->active_list);
1084
	INIT_LIST_HEAD(&ring->request_list);
1142
	INIT_LIST_HEAD(&ring->request_list);
-
 
1143
	ring->size = 32 * PAGE_SIZE;
Line 1085... Line 1144...
1085
	ring->size = 32 * PAGE_SIZE;
1144
	memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
Line 1086... Line 1145...
1086
 
1145
 
1087
	init_waitqueue_head(&ring->irq_queue);
1146
	init_waitqueue_head(&ring->irq_queue);
1088
 
1147
 
1089
	if (I915_NEED_GFX_HWS(dev)) {
1148
	if (I915_NEED_GFX_HWS(dev)) {
-
 
1149
       ret = init_status_page(ring);
-
 
1150
       if (ret)
-
 
1151
           return ret;
-
 
1152
	} else {
-
 
1153
		BUG_ON(ring->id != RCS);
1090
       ret = init_status_page(ring);
1154
		ret = init_phys_hws_pga(ring);
Line 1091... Line 1155...
1091
       if (ret)
1155
		if (ret)
1092
           return ret;
1156
			return ret;
1093
	}
1157
	}
Line 1152... Line 1216...
1152
	if (ring->obj == NULL)
1216
	if (ring->obj == NULL)
1153
		return;
1217
		return;
Line 1154... Line 1218...
1154
 
1218
 
1155
	/* Disable the ring buffer. The ring must be idle at this point */
1219
	/* Disable the ring buffer. The ring must be idle at this point */
1156
	dev_priv = ring->dev->dev_private;
1220
	dev_priv = ring->dev->dev_private;
1157
	ret = intel_wait_ring_idle(ring);
1221
	ret = intel_ring_idle(ring);
1158
	if (ret)
1222
	if (ret)
1159
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1223
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Line 1160... Line 1224...
1160
			  ring->name, ret);
1224
			  ring->name, ret);
Line 1171... Line 1235...
1171
		ring->cleanup(ring);
1235
		ring->cleanup(ring);
Line 1172... Line 1236...
1172
 
1236
 
1173
//   cleanup_status_page(ring);
1237
//   cleanup_status_page(ring);
Line 1174... Line -...
1174
}
-
 
1175
 
-
 
1176
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-
 
1177
{
-
 
1178
	uint32_t __iomem *virt;
-
 
1179
	int rem = ring->size - ring->tail;
-
 
1180
 
-
 
1181
	if (ring->space < rem) {
-
 
1182
		int ret = intel_wait_ring_buffer(ring, rem);
-
 
1183
		if (ret)
-
 
1184
			return ret;
-
 
1185
	}
-
 
1186
 
-
 
1187
	virt = ring->virtual_start + ring->tail;
-
 
1188
	rem /= 4;
-
 
1189
	while (rem--)
-
 
1190
		iowrite32(MI_NOOP, virt++);
-
 
1191
 
-
 
1192
	ring->tail = 0;
-
 
1193
	ring->space = ring_space(ring);
-
 
1194
 
-
 
1195
    return 0;
-
 
1196
}
1238
}
1197
 
1239
 
1198
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1240
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
Line 1199... Line 1241...
1199
{
1241
{
Line 1226... Line 1268...
1226
		int space;
1268
		int space;
Line 1227... Line 1269...
1227
 
1269
 
1228
		if (request->tail == -1)
1270
		if (request->tail == -1)
Line 1229... Line 1271...
1229
			continue;
1271
			continue;
1230
 
1272
 
1231
		space = request->tail - (ring->tail + 8);
1273
		space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1232
		if (space < 0)
1274
		if (space < 0)
1233
			space += ring->size;
1275
			space += ring->size;
1234
		if (space >= n) {
1276
		if (space >= n) {
Line 1261... Line 1303...
1261
		return -ENOSPC;
1303
		return -ENOSPC;
Line 1262... Line 1304...
1262
 
1304
 
1263
	return 0;
1305
	return 0;
Line 1264... Line 1306...
1264
}
1306
}
1265
 
1307
 
1266
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1308
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1267
{
1309
{
1268
	struct drm_device *dev = ring->dev;
1310
	struct drm_device *dev = ring->dev;
1269
	struct drm_i915_private *dev_priv = dev->dev_private;
1311
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1270... Line 1312...
1270
	unsigned long end;
1312
	unsigned long end;
1271
	int ret;
1313
	int ret;
1272
 
1314
 
Line 1273... Line -...
1273
	ret = intel_ring_wait_request(ring, n);
-
 
-
 
1315
	ret = intel_ring_wait_request(ring, n);
1274
	if (ret != -ENOSPC)
1316
	if (ret != -ENOSPC)
1275
		return ret;
1317
		return ret;
1276
 
1318
 
1277
 
1319
	trace_i915_ring_wait_begin(ring);
1278
	/* With GEM the hangcheck timer should kick us out of the loop,
1320
	/* With GEM the hangcheck timer should kick us out of the loop,
Line 1298... Line 1340...
1298
    } while (!time_after(GetTimerTicks(), end));
1340
    } while (!time_after(GetTimerTicks(), end));
1299
	trace_i915_ring_wait_end(ring);
1341
	trace_i915_ring_wait_end(ring);
1300
	return -EBUSY;
1342
	return -EBUSY;
1301
}
1343
}
Line -... Line 1344...
-
 
1344
 
-
 
1345
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-
 
1346
{
-
 
1347
	uint32_t __iomem *virt;
-
 
1348
	int rem = ring->size - ring->tail;
-
 
1349
 
-
 
1350
	if (ring->space < rem) {
-
 
1351
		int ret = ring_wait_for_space(ring, rem);
-
 
1352
		if (ret)
-
 
1353
			return ret;
-
 
1354
	}
-
 
1355
 
-
 
1356
	virt = ring->virtual_start + ring->tail;
-
 
1357
	rem /= 4;
-
 
1358
	while (rem--)
-
 
1359
		iowrite32(MI_NOOP, virt++);
-
 
1360
 
-
 
1361
	ring->tail = 0;
-
 
1362
	ring->space = ring_space(ring);
-
 
1363
 
-
 
1364
	return 0;
-
 
1365
}
-
 
1366
 
-
 
1367
int intel_ring_idle(struct intel_ring_buffer *ring)
-
 
1368
{
-
 
1369
	u32 seqno;
-
 
1370
	int ret;
-
 
1371
 
-
 
1372
	/* We need to add any requests required to flush the objects and ring */
-
 
1373
	if (ring->outstanding_lazy_request) {
-
 
1374
		ret = i915_add_request(ring, NULL, NULL);
-
 
1375
		if (ret)
-
 
1376
			return ret;
-
 
1377
	}
-
 
1378
 
-
 
1379
	/* Wait upon the last request to be completed */
-
 
1380
	if (list_empty(&ring->request_list))
-
 
1381
		return 0;
-
 
1382
 
-
 
1383
	seqno = list_entry(ring->request_list.prev,
-
 
1384
			   struct drm_i915_gem_request,
-
 
1385
			   list)->seqno;
-
 
1386
 
-
 
1387
	return i915_wait_seqno(ring, seqno);
-
 
1388
}
-
 
1389
 
-
 
1390
static int
-
 
1391
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
-
 
1392
{
-
 
1393
	if (ring->outstanding_lazy_request)
-
 
1394
		return 0;
-
 
1395
 
-
 
1396
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
-
 
1397
}
1302
 
1398
 
1303
int intel_ring_begin(struct intel_ring_buffer *ring,
1399
int intel_ring_begin(struct intel_ring_buffer *ring,
1304
		     int num_dwords)
1400
		     int num_dwords)
1305
{
1401
{
1306
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1402
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
Line 1309... Line 1405...
1309
 
1405
 
1310
	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1406
	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1311
	if (ret)
1407
	if (ret)
Line -... Line 1408...
-
 
1408
		return ret;
-
 
1409
 
-
 
1410
	/* Preallocate the olr before touching the ring */
-
 
1411
	ret = intel_ring_alloc_seqno(ring);
-
 
1412
	if (ret)
1312
		return ret;
1413
		return ret;
1313
 
1414
 
1314
	if (unlikely(ring->tail + n > ring->effective_size)) {
1415
	if (unlikely(ring->tail + n > ring->effective_size)) {
1315
		ret = intel_wrap_ring_buffer(ring);
1416
		ret = intel_wrap_ring_buffer(ring);
1316
		if (unlikely(ret))
1417
		if (unlikely(ret))
Line 1317... Line 1418...
1317
			return ret;
1418
			return ret;
1318
	}
1419
	}
1319
 
1420
 
1320
	if (unlikely(ring->space < n)) {
1421
	if (unlikely(ring->space < n)) {
1321
		ret = intel_wait_ring_buffer(ring, n);
1422
		ret = ring_wait_for_space(ring, n);
Line 1322... Line 1423...
1322
		if (unlikely(ret))
1423
		if (unlikely(ret))
Line 1380... Line 1481...
1380
	ret = intel_ring_begin(ring, 4);
1481
	ret = intel_ring_begin(ring, 4);
1381
	if (ret)
1482
	if (ret)
1382
		return ret;
1483
		return ret;
Line 1383... Line 1484...
1383
 
1484
 
-
 
1485
	cmd = MI_FLUSH_DW;
-
 
1486
	/*
-
 
1487
	 * Bspec vol 1c.5 - video engine command streamer:
-
 
1488
	 * "If ENABLED, all TLBs will be invalidated once the flush
-
 
1489
	 * operation is complete. This bit is only valid when the
-
 
1490
	 * Post-Sync Operation field is a value of 1h or 3h."
1384
	cmd = MI_FLUSH_DW;
1491
	 */
1385
	if (invalidate & I915_GEM_GPU_DOMAINS)
1492
	if (invalidate & I915_GEM_GPU_DOMAINS)
-
 
1493
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1386
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1494
			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1387
	intel_ring_emit(ring, cmd);
1495
	intel_ring_emit(ring, cmd);
1388
	intel_ring_emit(ring, 0);
1496
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1389
	intel_ring_emit(ring, 0);
1497
	intel_ring_emit(ring, 0);
1390
	intel_ring_emit(ring, MI_NOOP);
1498
	intel_ring_emit(ring, MI_NOOP);
1391
	intel_ring_advance(ring);
1499
	intel_ring_advance(ring);
1392
	return 0;
1500
	return 0;
Line 1393... Line 1501...
1393
}
1501
}
-
 
1502
 
-
 
1503
static int
-
 
1504
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-
 
1505
			      u32 offset, u32 len,
-
 
1506
			      unsigned flags)
-
 
1507
{
-
 
1508
	int ret;
-
 
1509
 
-
 
1510
	ret = intel_ring_begin(ring, 2);
-
 
1511
	if (ret)
-
 
1512
		return ret;
-
 
1513
 
-
 
1514
	intel_ring_emit(ring,
-
 
1515
			MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
-
 
1516
			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
-
 
1517
	/* bit0-7 is the length on GEN6+ */
-
 
1518
	intel_ring_emit(ring, offset);
-
 
1519
	intel_ring_advance(ring);
-
 
1520
 
-
 
1521
	return 0;
-
 
1522
}
1394
 
1523
 
1395
static int
1524
static int
-
 
1525
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1396
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1526
			      u32 offset, u32 len,
1397
			      u32 offset, u32 len)
1527
			      unsigned flags)
Line 1398... Line 1528...
1398
{
1528
{
1399
       int ret;
1529
       int ret;
1400
 
1530
 
Line -... Line 1531...
-
 
1531
       ret = intel_ring_begin(ring, 2);
-
 
1532
       if (ret)
1401
       ret = intel_ring_begin(ring, 2);
1533
	       return ret;
1402
       if (ret)
1534
 
1403
	       return ret;
1535
	intel_ring_emit(ring,
1404
 
1536
			MI_BATCH_BUFFER_START |
Line 1405... Line 1537...
1405
       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1537
			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
Line 1421... Line 1553...
1421
	ret = intel_ring_begin(ring, 4);
1553
	ret = intel_ring_begin(ring, 4);
1422
	if (ret)
1554
	if (ret)
1423
		return ret;
1555
		return ret;
Line 1424... Line 1556...
1424
 
1556
 
-
 
1557
	cmd = MI_FLUSH_DW;
-
 
1558
	/*
-
 
1559
	 * Bspec vol 1c.3 - blitter engine command streamer:
-
 
1560
	 * "If ENABLED, all TLBs will be invalidated once the flush
-
 
1561
	 * operation is complete. This bit is only valid when the
-
 
1562
	 * Post-Sync Operation field is a value of 1h or 3h."
1425
	cmd = MI_FLUSH_DW;
1563
	 */
1426
	if (invalidate & I915_GEM_DOMAIN_RENDER)
1564
	if (invalidate & I915_GEM_DOMAIN_RENDER)
-
 
1565
		cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1427
		cmd |= MI_INVALIDATE_TLB;
1566
			MI_FLUSH_DW_OP_STOREDW;
1428
	intel_ring_emit(ring, cmd);
1567
	intel_ring_emit(ring, cmd);
1429
	intel_ring_emit(ring, 0);
1568
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1430
	intel_ring_emit(ring, 0);
1569
	intel_ring_emit(ring, 0);
1431
	intel_ring_emit(ring, MI_NOOP);
1570
	intel_ring_emit(ring, MI_NOOP);
1432
	intel_ring_advance(ring);
1571
	intel_ring_advance(ring);
1433
	return 0;
1572
	return 0;
Line 1479... Line 1618...
1479
			ring->irq_put = i9xx_ring_put_irq;
1618
			ring->irq_put = i9xx_ring_put_irq;
1480
		}
1619
		}
1481
		ring->irq_enable_mask = I915_USER_INTERRUPT;
1620
		ring->irq_enable_mask = I915_USER_INTERRUPT;
1482
	}
1621
	}
1483
	ring->write_tail = ring_write_tail;
1622
	ring->write_tail = ring_write_tail;
-
 
1623
	if (IS_HASWELL(dev))
-
 
1624
		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1484
	if (INTEL_INFO(dev)->gen >= 6)
1625
	else if (INTEL_INFO(dev)->gen >= 6)
1485
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1626
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1486
	else if (INTEL_INFO(dev)->gen >= 4)
1627
	else if (INTEL_INFO(dev)->gen >= 4)
1487
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1628
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1488
	else if (IS_I830(dev) || IS_845G(dev))
1629
	else if (IS_I830(dev) || IS_845G(dev))
1489
		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1630
		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1490
	else
1631
	else
1491
		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1632
		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1492
	ring->init = init_render_ring;
1633
	ring->init = init_render_ring;
1493
	ring->cleanup = render_ring_cleanup;
1634
	ring->cleanup = render_ring_cleanup;
Line -... Line 1635...
-
 
1635
 
-
 
1636
	/* Workaround batchbuffer to combat CS tlb bug. */
-
 
1637
	if (HAS_BROKEN_CS_TLB(dev)) {
-
 
1638
		struct drm_i915_gem_object *obj;
Line -... Line 1639...
-
 
1639
		int ret;
1494
 
1640
 
1495
 
1641
		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
-
 
1642
		if (obj == NULL) {
-
 
1643
			DRM_ERROR("Failed to allocate batch bo\n");
-
 
1644
			return -ENOMEM;
1496
	if (!I915_NEED_GFX_HWS(dev)) {
1645
		}
-
 
1646
 
-
 
1647
		ret = i915_gem_object_pin(obj, 0, true, false);
-
 
1648
		if (ret != 0) {
-
 
1649
			drm_gem_object_unreference(&obj->base);
-
 
1650
			DRM_ERROR("Failed to ping batch bo\n");
-
 
1651
			return ret;
-
 
1652
		}
1497
		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1653
 
Line 1498... Line 1654...
1498
		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1654
		ring->private = obj;
1499
	}
1655
	}
Line -... Line 1656...
-
 
1656
 
-
 
1657
	return intel_init_ring_buffer(dev, ring);
-
 
1658
}
-
 
1659
 
-
 
1660
#if 0
-
 
1661
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-
 
1662
{
-
 
1663
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
1664
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
-
 
1665
	int ret;
-
 
1666
 
-
 
1667
	ring->name = "render ring";
-
 
1668
	ring->id = RCS;
-
 
1669
	ring->mmio_base = RENDER_RING_BASE;
-
 
1670
 
-
 
1671
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
1672
		/* non-kms not supported on gen6+ */
-
 
1673
		return -ENODEV;
-
 
1674
	}
-
 
1675
 
-
 
1676
	/* Note: gem is not supported on gen5/ilk without kms (the corresponding
-
 
1677
	 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
-
 
1678
	 * the special gen5 functions. */
-
 
1679
	ring->add_request = i9xx_add_request;
-
 
1680
	if (INTEL_INFO(dev)->gen < 4)
-
 
1681
		ring->flush = gen2_render_ring_flush;
-
 
1682
	else
-
 
1683
		ring->flush = gen4_render_ring_flush;
-
 
1684
	ring->get_seqno = ring_get_seqno;
-
 
1685
	if (IS_GEN2(dev)) {
-
 
1686
		ring->irq_get = i8xx_ring_get_irq;
-
 
1687
		ring->irq_put = i8xx_ring_put_irq;
-
 
1688
	} else {
-
 
1689
		ring->irq_get = i9xx_ring_get_irq;
-
 
1690
		ring->irq_put = i9xx_ring_put_irq;
-
 
1691
	}
-
 
1692
	ring->irq_enable_mask = I915_USER_INTERRUPT;
-
 
1693
	ring->write_tail = ring_write_tail;
-
 
1694
	if (INTEL_INFO(dev)->gen >= 4)
-
 
1695
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
-
 
1696
	else if (IS_I830(dev) || IS_845G(dev))
-
 
1697
		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
-
 
1698
	else
-
 
1699
		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-
 
1700
	ring->init = init_render_ring;
-
 
1701
	ring->cleanup = render_ring_cleanup;
-
 
1702
 
-
 
1703
	ring->dev = dev;
-
 
1704
	INIT_LIST_HEAD(&ring->active_list);
-
 
1705
	INIT_LIST_HEAD(&ring->request_list);
-
 
1706
 
-
 
1707
	ring->size = size;
-
 
1708
	ring->effective_size = ring->size;
-
 
1709
	if (IS_I830(ring->dev) || IS_845G(ring->dev))
-
 
1710
		ring->effective_size -= 128;
-
 
1711
 
-
 
1712
	ring->virtual_start = ioremap_wc(start, size);
-
 
1713
	if (ring->virtual_start == NULL) {
-
 
1714
		DRM_ERROR("can not ioremap virtual address for"
-
 
1715
			  " ring buffer\n");
-
 
1716
		return -ENOMEM;
-
 
1717
	}
-
 
1718
 
-
 
1719
	if (!I915_NEED_GFX_HWS(dev)) {
-
 
1720
		ret = init_phys_hws_pga(ring);
-
 
1721
		if (ret)
-
 
1722
			return ret;
-
 
1723
	}
Line 1500... Line 1724...
1500
 
1724
 
1501
	return intel_init_ring_buffer(dev, ring);
1725
	return 0;
1502
}
1726
}
1503
 
1727
#endif
Line 1545... Line 1769...
1545
		}
1769
		}
1546
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1770
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1547
	}
1771
	}
1548
	ring->init = init_ring_common;
1772
	ring->init = init_ring_common;
Line 1549... Line -...
1549
 
-
 
1550
 
1773
 
1551
	return intel_init_ring_buffer(dev, ring);
1774
	return intel_init_ring_buffer(dev, ring);
Line 1552... Line 1775...
1552
}
1775
}
1553
 
1776