Subversion Repositories Kolibri OS

Rev

Rev 2339 | Rev 2342 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2339 Rev 2340
Line 145... Line 145...
145
{
145
{
146
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
146
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
147
	struct drm_i915_gem_object *obj = ring->obj;
147
	struct drm_i915_gem_object *obj = ring->obj;
148
	u32 head;
148
	u32 head;
Line 149... Line -...
149
 
-
 
150
    ENTER();
-
 
151
 
149
 
152
	/* Stop the ring if it's running. */
150
	/* Stop the ring if it's running. */
153
	I915_WRITE_CTL(ring, 0);
151
	I915_WRITE_CTL(ring, 0);
154
	I915_WRITE_HEAD(ring, 0);
152
	I915_WRITE_HEAD(ring, 0);
Line 201... Line 199...
201
 
199
 
202
    ring->head = I915_READ_HEAD(ring);
200
    ring->head = I915_READ_HEAD(ring);
203
    ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
201
    ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Line 204... Line -...
204
    ring->space = ring_space(ring);
-
 
Line 205... Line 202...
205
 
202
    ring->space = ring_space(ring);
206
    LEAVE();
203
 
Line 207... Line 204...
207
 
204
 
Line 283... Line 280...
283
 
280
 
284
static int init_render_ring(struct intel_ring_buffer *ring)
281
static int init_render_ring(struct intel_ring_buffer *ring)
285
{
282
{
286
	struct drm_device *dev = ring->dev;
283
	struct drm_device *dev = ring->dev;
287
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
288
 
-
 
289
    ENTER();
-
 
290
 
284
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 291... Line 285...
291
	int ret = init_ring_common(ring);
285
	int ret = init_ring_common(ring);
292
 
286
 
293
	if (INTEL_INFO(dev)->gen > 3) {
287
	if (INTEL_INFO(dev)->gen > 3) {
Line 306... Line 300...
306
		ret = init_pipe_control(ring);
300
		ret = init_pipe_control(ring);
307
		if (ret)
301
		if (ret)
308
			return ret;
302
			return ret;
309
	}
303
	}
Line 310... Line -...
310
 
-
 
311
    LEAVE();
-
 
312
 
304
 
313
	return ret;
305
	return ret;
Line 314... Line 306...
314
}
306
}
315
 
307
 
Line 559... Line 551...
559
		else
551
		else
560
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
552
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
561
	}
553
	}
562
	spin_unlock(&ring->irq_lock);
554
	spin_unlock(&ring->irq_lock);
563
}
555
}
-
 
556
#endif
Line 564... Line 557...
564
 
557
 
565
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
558
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
566
{
559
{
567
	struct drm_device *dev = ring->dev;
560
	struct drm_device *dev = ring->dev;
Line 590... Line 583...
590
	}
583
	}
Line 591... Line 584...
591
 
584
 
592
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
585
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
593
	POSTING_READ(mmio);
586
	POSTING_READ(mmio);
594
}
-
 
Line 595... Line 587...
595
#endif
587
}
596
 
588
 
597
static int
589
static int
598
bsd_ring_flush(struct intel_ring_buffer *ring,
590
bsd_ring_flush(struct intel_ring_buffer *ring,
Line 704... Line 696...
704
		else
696
		else
705
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
697
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
706
	}
698
	}
707
	spin_unlock(&ring->irq_lock);
699
	spin_unlock(&ring->irq_lock);
708
}
700
}
-
 
701
#endif
Line 709... Line 702...
709
 
702
 
710
static int
703
static int
711
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
704
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
712
{
705
{
Line 799... Line 792...
799
	if (ret != 0) {
792
	if (ret != 0) {
800
		goto err_unref;
793
		goto err_unref;
801
	}
794
	}
Line 802... Line 795...
802
 
795
 
803
	ring->status_page.gfx_addr = obj->gtt_offset;
796
	ring->status_page.gfx_addr = obj->gtt_offset;
804
	ring->status_page.page_addr = kmap(obj->pages[0]);
797
    ring->status_page.page_addr = MapIoMem(obj->pages[0], 4096, PG_SW);
805
	if (ring->status_page.page_addr == NULL) {
798
	if (ring->status_page.page_addr == NULL) {
806
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
799
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
807
		goto err_unpin;
800
		goto err_unpin;
808
	}
801
	}
Line 814... Line 807...
814
			ring->name, ring->status_page.gfx_addr);
807
			ring->name, ring->status_page.gfx_addr);
Line 815... Line 808...
815
 
808
 
Line 816... Line 809...
816
	return 0;
809
	return 0;
817
 
810
 
818
err_unpin:
811
err_unpin:
819
	i915_gem_object_unpin(obj);
812
 //  i915_gem_object_unpin(obj);
820
err_unref:
813
err_unref:
821
	drm_gem_object_unreference(&obj->base);
814
 //  drm_gem_object_unreference(&obj->base);
822
err:
815
err:
823
	return ret;
-
 
Line 824... Line 816...
824
}
816
	return ret;
825
#endif
817
}
826
 
818
 
827
int intel_init_ring_buffer(struct drm_device *dev,
819
int intel_init_ring_buffer(struct drm_device *dev,
828
			   struct intel_ring_buffer *ring)
820
			   struct intel_ring_buffer *ring)
829
{
821
{
830
    struct drm_i915_gem_object *obj=NULL;
822
	struct drm_i915_gem_object *obj;
831
	int ret;
823
	int ret;
832
    ENTER();
824
 
833
	ring->dev = dev;
825
	ring->dev = dev;
Line 834... Line 826...
834
	INIT_LIST_HEAD(&ring->active_list);
826
	INIT_LIST_HEAD(&ring->active_list);
835
	INIT_LIST_HEAD(&ring->request_list);
827
	INIT_LIST_HEAD(&ring->request_list);
836
	INIT_LIST_HEAD(&ring->gpu_write_list);
828
	INIT_LIST_HEAD(&ring->gpu_write_list);
Line 837... Line 829...
837
 
829
 
838
//   init_waitqueue_head(&ring->irq_queue);
830
//   init_waitqueue_head(&ring->irq_queue);
839
//   spin_lock_init(&ring->irq_lock);
831
//   spin_lock_init(&ring->irq_lock);
840
    ring->irq_mask = ~0;
832
    ring->irq_mask = ~0;
841
 
833
 
Line 842... Line 834...
842
	if (I915_NEED_GFX_HWS(dev)) {
834
	if (I915_NEED_GFX_HWS(dev)) {
843
//       ret = init_status_page(ring);
835
       ret = init_status_page(ring);
844
//       if (ret)
836
       if (ret)
Line 884... Line 876...
884
	 * of the buffer.
876
	 * of the buffer.
885
	 */
877
	 */
886
	ring->effective_size = ring->size;
878
	ring->effective_size = ring->size;
887
	if (IS_I830(ring->dev))
879
	if (IS_I830(ring->dev))
888
		ring->effective_size -= 128;
880
		ring->effective_size -= 128;
889
    LEAVE();
881
 
890
	return 0;
882
	return 0;
Line 891... Line 883...
891
 
883
 
892
err_unmap:
884
err_unmap:
893
//   drm_core_ioremapfree(&ring->map, dev);
885
//   drm_core_ioremapfree(&ring->map, dev);
Line 934... Line 926...
934
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
926
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
935
{
927
{
936
	unsigned int *virt;
928
	unsigned int *virt;
937
	int rem = ring->size - ring->tail;
929
	int rem = ring->size - ring->tail;
Line -... Line 930...
-
 
930
 
-
 
931
    ENTER();
938
 
932
 
939
	if (ring->space < rem) {
933
	if (ring->space < rem) {
940
		int ret = intel_wait_ring_buffer(ring, rem);
934
		int ret = intel_wait_ring_buffer(ring, rem);
941
		if (ret)
935
		if (ret)
942
			return ret;
936
			return ret;
Line 950... Line 944...
950
	}
944
	}
Line 951... Line 945...
951
 
945
 
952
	ring->tail = 0;
946
	ring->tail = 0;
Line -... Line 947...
-
 
947
	ring->space = ring_space(ring);
953
	ring->space = ring_space(ring);
948
 
954
 
949
    LEAVE();
Line 955... Line 950...
955
	return 0;
950
	return 0;
956
}
951
}
957
 
952
 
958
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
953
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
959
{
954
{
960
	struct drm_device *dev = ring->dev;
955
	struct drm_device *dev = ring->dev;
Line -... Line 956...
-
 
956
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
957
	unsigned long end;
961
	struct drm_i915_private *dev_priv = dev->dev_private;
958
	u32 head;
962
	unsigned long end;
959
 
963
	u32 head;
960
    ENTER();
964
 
961
 
965
	/* If the reported head position has wrapped or hasn't advanced,
962
	/* If the reported head position has wrapped or hasn't advanced,
966
	 * fallback to the slow and accurate path.
963
	 * fallback to the slow and accurate path.
967
	 */
964
	 */
968
	head = intel_read_status_page(ring, 4);
965
	head = intel_read_status_page(ring, 4);
-
 
966
	if (head > ring->head) {
-
 
967
		ring->head = head;
969
	if (head > ring->head) {
968
		ring->space = ring_space(ring);
-
 
969
		if (ring->space >= n)
970
		ring->head = head;
970
        {
Line 971... Line 971...
971
		ring->space = ring_space(ring);
971
            LEAVE();
972
		if (ring->space >= n)
972
			return 0;
973
			return 0;
973
        };
974
	}
974
	}
975
 
975
 
976
//   trace_i915_ring_wait_begin(ring);
976
//   trace_i915_ring_wait_begin(ring);
977
	end = jiffies + 3 * HZ;
977
	end = jiffies + 3 * HZ;
-
 
978
	do {
978
	do {
979
		ring->head = I915_READ_HEAD(ring);
979
		ring->head = I915_READ_HEAD(ring);
980
		ring->space = ring_space(ring);
Line 980... Line -...
980
		ring->space = ring_space(ring);
-
 
981
		if (ring->space >= n) {
-
 
982
//           trace_i915_ring_wait_end(ring);
-
 
983
			return 0;
-
 
984
		}
-
 
985
 
-
 
986
		if (dev->primary->master) {
981
		if (ring->space >= n) {
987
			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
982
//           trace_i915_ring_wait_end(ring);
-
 
983
            LEAVE();
-
 
984
			return 0;
988
			if (master_priv->sarea_priv)
985
		}
-
 
986
 
989
				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
987
		msleep(1);
990
		}
988
		if (atomic_read(&dev_priv->mm.wedged))
-
 
989
        {
-
 
990
            LEAVE();
991
 
991
			return -EAGAIN;
992
		msleep(1);
992
        };
Line 993... Line 993...
993
		if (atomic_read(&dev_priv->mm.wedged))
993
	} while (!time_after(jiffies, end));
994
			return -EAGAIN;
994
//   trace_i915_ring_wait_end(ring);
995
	} while (!time_after(jiffies, end));
995
    LEAVE();
996
//   trace_i915_ring_wait_end(ring);
996
 
997
	return -EBUSY;
997
	return -EBUSY;
998
}
998
}
Line 999... Line 999...
999
 
999
 
1000
int intel_ring_begin(struct intel_ring_buffer *ring,
1000
int intel_ring_begin(struct intel_ring_buffer *ring,
Line 1001... Line 1001...
1001
		     int num_dwords)
1001
		     int num_dwords)
1002
{
1002
{
1003
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1003
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1004
	int n = 4*num_dwords;
1004
	int n = 4*num_dwords;
Line 1039... Line 1039...
1039
    .flush          = render_ring_flush,
1039
    .flush          = render_ring_flush,
1040
    .add_request        = render_ring_add_request,
1040
    .add_request        = render_ring_add_request,
1041
//   .get_seqno      = ring_get_seqno,
1041
//   .get_seqno      = ring_get_seqno,
1042
//   .irq_get        = render_ring_get_irq,
1042
//   .irq_get        = render_ring_get_irq,
1043
//   .irq_put        = render_ring_put_irq,
1043
//   .irq_put        = render_ring_put_irq,
1044
//   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1044
   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1045
//       .cleanup            = render_ring_cleanup,
1045
//       .cleanup            = render_ring_cleanup,
1046
};
1046
};
Line 1047... Line 1047...
1047
 
1047
 
Line 1057... Line 1057...
1057
    .flush          = bsd_ring_flush,
1057
    .flush          = bsd_ring_flush,
1058
    .add_request        = ring_add_request,
1058
    .add_request        = ring_add_request,
1059
//   .get_seqno      = ring_get_seqno,
1059
//   .get_seqno      = ring_get_seqno,
1060
//   .irq_get        = bsd_ring_get_irq,
1060
//   .irq_get        = bsd_ring_get_irq,
1061
//   .irq_put        = bsd_ring_put_irq,
1061
//   .irq_put        = bsd_ring_put_irq,
1062
//   .dispatch_execbuffer    = ring_dispatch_execbuffer,
1062
   .dispatch_execbuffer    = ring_dispatch_execbuffer,
1063
};
1063
};
Line 1064... Line 1064...
1064
 
1064
 
1065
 
1065
 
Line 1104... Line 1104...
1104
	intel_ring_emit(ring, MI_NOOP);
1104
	intel_ring_emit(ring, MI_NOOP);
1105
	intel_ring_advance(ring);
1105
	intel_ring_advance(ring);
1106
	return 0;
1106
	return 0;
1107
}
1107
}
Line 1108... Line -...
1108
 
-
 
1109
#if 0
1108
 
1110
static int
1109
static int
1111
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1110
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1112
			      u32 offset, u32 len)
1111
			      u32 offset, u32 len)
1113
{
1112
{
Line 1123... Line 1122...
1123
       intel_ring_advance(ring);
1122
       intel_ring_advance(ring);
Line 1124... Line 1123...
1124
 
1123
 
1125
       return 0;
1124
       return 0;
Line -... Line 1125...
-
 
1125
}
-
 
1126
 
1126
}
1127
#if 0
1127
 
1128
 
1128
static bool
1129
static bool
1129
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1130
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1130
{
1131
{
Line 1170... Line 1171...
1170
    .flush          = gen6_ring_flush,
1171
    .flush          = gen6_ring_flush,
1171
    .add_request        = gen6_add_request,
1172
    .add_request        = gen6_add_request,
1172
//   .get_seqno      = ring_get_seqno,
1173
//   .get_seqno      = ring_get_seqno,
1173
//   .irq_get        = gen6_bsd_ring_get_irq,
1174
//   .irq_get        = gen6_bsd_ring_get_irq,
1174
//   .irq_put        = gen6_bsd_ring_put_irq,
1175
//   .irq_put        = gen6_bsd_ring_put_irq,
1175
//   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1176
   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1176
};
1177
};
Line 1177... Line 1178...
1177
 
1178
 
1178
#if 0
1179
#if 0
Line 1302... Line 1303...
1302
       .flush          = blt_ring_flush,
1303
       .flush          = blt_ring_flush,
1303
       .add_request        = gen6_add_request,
1304
       .add_request        = gen6_add_request,
1304
//       .get_seqno      = ring_get_seqno,
1305
//       .get_seqno      = ring_get_seqno,
1305
//       .irq_get            = blt_ring_get_irq,
1306
//       .irq_get            = blt_ring_get_irq,
1306
//       .irq_put            = blt_ring_put_irq,
1307
//       .irq_put            = blt_ring_put_irq,
1307
//       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1308
       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1308
//       .cleanup            = blt_ring_cleanup,
1309
//       .cleanup            = blt_ring_cleanup,
1309
};
1310
};
Line 1310... Line 1311...
1310
 
1311
 
1311
int intel_init_render_ring_buffer(struct drm_device *dev)
1312
int intel_init_render_ring_buffer(struct drm_device *dev)
1312
{
1313
{
1313
	drm_i915_private_t *dev_priv = dev->dev_private;
1314
	drm_i915_private_t *dev_priv = dev->dev_private;
1314
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1315
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1315
    ENTER();
1316
 
1316
	*ring = render_ring;
1317
	*ring = render_ring;
1317
	if (INTEL_INFO(dev)->gen >= 6) {
1318
	if (INTEL_INFO(dev)->gen >= 6) {
1318
       ring->add_request = gen6_add_request;
1319
       ring->add_request = gen6_add_request;
1319
//       ring->irq_get = gen6_render_ring_get_irq;
1320
//       ring->irq_get = gen6_render_ring_get_irq;
Line 1325... Line 1326...
1325
 
1326
 
1326
	if (!I915_NEED_GFX_HWS(dev)) {
1327
	if (!I915_NEED_GFX_HWS(dev)) {
1327
		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1328
		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1328
		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1329
		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1329
	}
1330
	}
1330
    LEAVE();
1331
 
1331
	return intel_init_ring_buffer(dev, ring);
1332
	return intel_init_ring_buffer(dev, ring);
Line 1332... Line 1333...
1332
}
1333
}