Subversion Repositories Kolibri OS

Rev

Rev 2351 | Rev 2360 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2351 Rev 2352
Line 125... Line 125...
125
				struct drm_i915_gem_object *obj,
125
				struct drm_i915_gem_object *obj,
126
				struct drm_i915_gem_pwrite *args,
126
				struct drm_i915_gem_pwrite *args,
127
				struct drm_file *file);
127
				struct drm_file *file);
128
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
128
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Line 129... Line 129...
129
 
129
 
130
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
130
//static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Line 131... Line 131...
131
				    struct shrink_control *sc);
131
//                   struct shrink_control *sc);
132
 
132
 
133
/* some bookkeeping */
133
/* some bookkeeping */
134
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
134
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
Line 188... Line 188...
188
		return ret;
188
		return ret;
Line 189... Line 189...
189
 
189
 
190
	WARN_ON(i915_verify_lists(dev));
190
	WARN_ON(i915_verify_lists(dev));
191
	return 0;
191
	return 0;
-
 
192
}
Line 192... Line 193...
192
}
193
#endif
193
 
194
 
194
static inline bool
195
static inline bool
195
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
196
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
196
{
197
{
Line 197... Line -...
197
	return obj->gtt_space && !obj->active && obj->pin_count == 0;
-
 
198
}
-
 
199
 
198
	return obj->gtt_space && !obj->active && obj->pin_count == 0;
200
#endif
199
}
201
 
200
 
202
void i915_gem_do_init(struct drm_device *dev,
201
void i915_gem_do_init(struct drm_device *dev,
203
		      unsigned long start,
202
		      unsigned long start,
Line 778... Line 777...
778
 
777
 
Line 779... Line 778...
779
	return 0;
778
	return 0;
780
 
779
 
781
err_pages:
780
err_pages:
Line 782... Line 781...
782
    while (i--)
781
    while (i--)
783
        FreePage(obj->pages[i]);
782
        FreePage((addr_t)obj->pages[i]);
784
 
783
 
785
    free(obj->pages);
784
    free(obj->pages);
Line 800... Line 799...
800
 
799
 
801
	if (obj->madv == I915_MADV_DONTNEED)
800
	if (obj->madv == I915_MADV_DONTNEED)
Line 802... Line 801...
802
		obj->dirty = 0;
801
		obj->dirty = 0;
803
 
802
 
804
	for (i = 0; i < page_count; i++) {
803
	for (i = 0; i < page_count; i++) {
805
        FreePage(obj->pages[i]);
804
        FreePage((addr_t)obj->pages[i]);
Line 806... Line 805...
806
	}
805
	}
807
	obj->dirty = 0;
806
	obj->dirty = 0;
Line 862... Line 861...
862
	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
861
	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Line 863... Line 862...
863
 
862
 
864
	i915_gem_object_move_off_active(obj);
863
	i915_gem_object_move_off_active(obj);
Line -... Line 864...
-
 
864
}
-
 
865
 
-
 
866
static void
-
 
867
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
-
 
868
{
-
 
869
	struct drm_device *dev = obj->base.dev;
-
 
870
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
871
 
-
 
872
	if (obj->pin_count != 0)
-
 
873
		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
-
 
874
	else
-
 
875
		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
-
 
876
 
-
 
877
	BUG_ON(!list_empty(&obj->gpu_write_list));
Line -... Line 878...
-
 
878
	BUG_ON(!obj->active);
-
 
879
	obj->ring = NULL;
Line -... Line 880...
-
 
880
 
-
 
881
	i915_gem_object_move_off_active(obj);
-
 
882
	obj->fenced_gpu_access = false;
Line -... Line 883...
-
 
883
 
-
 
884
	obj->active = 0;
Line 865... Line 885...
865
}
885
	obj->pending_gpu_write = false;
866
 
886
	drm_gem_object_unreference(&obj->base);
867
 
887
 
868
 
888
	WARN_ON(i915_verify_lists(dev));
Line 904... Line 924...
904
			obj->base.write_domain = 0;
924
			obj->base.write_domain = 0;
905
			list_del_init(&obj->gpu_write_list);
925
			list_del_init(&obj->gpu_write_list);
906
			i915_gem_object_move_to_active(obj, ring,
926
			i915_gem_object_move_to_active(obj, ring,
907
						       i915_gem_next_request_seqno(ring));
927
						       i915_gem_next_request_seqno(ring));
Line -... Line 928...
-
 
928
 
-
 
929
			trace_i915_gem_object_change_domain(obj,
-
 
930
							    obj->base.read_domains,
908
 
931
							    old_write_domain);
909
		}
932
		}
910
	}
933
	}
Line -... Line 934...
-
 
934
}
-
 
935
 
-
 
936
int
-
 
937
i915_add_request(struct intel_ring_buffer *ring,
-
 
938
		 struct drm_file *file,
-
 
939
		 struct drm_i915_gem_request *request)
-
 
940
{
-
 
941
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-
 
942
	uint32_t seqno;
Line -... Line 943...
-
 
943
	int was_empty;
Line -... Line 944...
-
 
944
	int ret;
-
 
945
 
-
 
946
	BUG_ON(request == NULL);
Line -... Line 947...
-
 
947
 
Line -... Line 948...
-
 
948
	ret = ring->add_request(ring, &seqno);
-
 
949
	if (ret)
-
 
950
	    return ret;
-
 
951
 
-
 
952
	trace_i915_gem_request_add(ring, seqno);
-
 
953
 
-
 
954
	request->seqno = seqno;
-
 
955
	request->ring = ring;
-
 
956
	request->emitted_jiffies = jiffies;
-
 
957
	was_empty = list_empty(&ring->request_list);
-
 
958
	list_add_tail(&request->list, &ring->request_list);
-
 
959
 
-
 
960
 
-
 
961
	ring->outstanding_lazy_request = false;
-
 
962
 
-
 
963
//	if (!dev_priv->mm.suspended) {
-
 
964
//		if (i915_enable_hangcheck) {
-
 
965
//			mod_timer(&dev_priv->hangcheck_timer,
-
 
966
//				  jiffies +
-
 
967
//				  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
-
 
968
//		}
Line -... Line 982...
-
 
982
 
-
 
983
 
-
 
984
 
-
 
985
 
-
 
986
 
-
 
987
/**
-
 
988
 * This function clears the request list as sequence numbers are passed.
-
 
989
 */
Line -... Line 990...
-
 
990
static void
-
 
991
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Line -... Line 992...
-
 
992
{
Line -... Line 993...
-
 
993
	uint32_t seqno;
Line -... Line 994...
-
 
994
	int i;
-
 
995
 
-
 
996
	if (list_empty(&ring->request_list))
Line -... Line 997...
-
 
997
		return;
-
 
998
 
Line -... Line 999...
-
 
999
	WARN_ON(i915_verify_lists(ring->dev));
-
 
1000
 
-
 
1001
	seqno = ring->get_seqno(ring);
Line -... Line 1002...
-
 
1002
 
-
 
1003
	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Line -... Line 1004...
-
 
1004
		if (seqno >= ring->sync_seqno[i])
Line -... Line 1005...
-
 
1005
			ring->sync_seqno[i] = 0;
-
 
1006
 
-
 
1007
	while (!list_empty(&ring->request_list)) {
Line -... Line 1008...
-
 
1008
		struct drm_i915_gem_request *request;
-
 
1009
 
-
 
1010
		request = list_first_entry(&ring->request_list,
-
 
1011
					   struct drm_i915_gem_request,
-
 
1012
					   list);
Line -... Line 1013...
-
 
1013
 
-
 
1014
		if (!i915_seqno_passed(seqno, request->seqno))
-
 
1015
			break;
Line -... Line 1016...
-
 
1016
 
-
 
1017
		trace_i915_gem_request_retire(ring, request->seqno);
-
 
1018
 
-
 
1019
		list_del(&request->list);
-
 
1020
		kfree(request);
-
 
1021
	}
-
 
1022
 
-
 
1023
	/* Move any buffers on the active list that are no longer referenced
-
 
1024
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
-
 
1025
	 */
-
 
1026
	while (!list_empty(&ring->active_list)) {
-
 
1027
		struct drm_i915_gem_object *obj;
-
 
1028
 
-
 
1029
		obj = list_first_entry(&ring->active_list,
-
 
1030
				      struct drm_i915_gem_object,
-
 
1031
				      ring_list);
-
 
1032
 
-
 
1033
		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
-
 
1034
			break;
-
 
1035
 
-
 
1036
		if (obj->base.write_domain != 0)
-
 
1037
			i915_gem_object_move_to_flushing(obj);
-
 
1038
		else
-
 
1039
			i915_gem_object_move_to_inactive(obj);
-
 
1040
	}
-
 
1041
 
-
 
1042
	if (unlikely(ring->trace_irq_seqno &&
-
 
1043
		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
-
 
1044
		ring->irq_put(ring);
-
 
1045
		ring->trace_irq_seqno = 0;
-
 
1046
	}
-
 
1047
 
-
 
1048
	WARN_ON(i915_verify_lists(ring->dev));
-
 
1049
}
-
 
1050
 
-
 
1051
void
-
 
1052
i915_gem_retire_requests(struct drm_device *dev)
-
 
1053
{
-
 
1054
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
1055
	int i;
-
 
1056
 
Line 947... Line 1080...
947
 
1080
 
-
 
1081
 
-
 
1082
 
-
 
1083
 
-
 
1084
/**
-
 
1085
 * Waits for a sequence number to be signaled, and cleans up the
-
 
1086
 * request and object lists appropriately for that event.
-
 
1087
 */
-
 
1088
int
-
 
1089
i915_wait_request(struct intel_ring_buffer *ring,
-
 
1090
		  uint32_t seqno)
-
 
1091
{
-
 
1092
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-
 
1093
	u32 ier;
-
 
1094
	int ret = 0;
-
 
1095
 
-
 
1096
	BUG_ON(seqno == 0);
-
 
1097
 
-
 
1098
//   if (atomic_read(&dev_priv->mm.wedged)) {
-
 
1099
//       struct completion *x = &dev_priv->error_completion;
-
 
1100
//       bool recovery_complete;
-
 
1101
//       unsigned long flags;
-
 
1102
 
-
 
1103
		/* Give the error handler a chance to run. */
-
 
1104
//       spin_lock_irqsave(&x->wait.lock, flags);
-
 
1105
//       recovery_complete = x->done > 0;
-
 
1106
//       spin_unlock_irqrestore(&x->wait.lock, flags);
-
 
1107
//
-
 
1108
//       return recovery_complete ? -EIO : -EAGAIN;
-
 
1109
//   }
-
 
1110
 
-
 
1111
	if (seqno == ring->outstanding_lazy_request) {
-
 
1112
		struct drm_i915_gem_request *request;
-
 
1113
 
-
 
1114
		request = kzalloc(sizeof(*request), GFP_KERNEL);
-
 
1115
		if (request == NULL)
-
 
1116
			return -ENOMEM;
-
 
1117
 
-
 
1118
		ret = i915_add_request(ring, NULL, request);
-
 
1119
		if (ret) {
-
 
1120
			kfree(request);
-
 
1121
			return ret;
-
 
1122
		}
-
 
1123
 
-
 
1124
		seqno = request->seqno;
-
 
1125
	}
-
 
1126
 
-
 
1127
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-
 
1128
		if (HAS_PCH_SPLIT(ring->dev))
-
 
1129
			ier = I915_READ(DEIER) | I915_READ(GTIER);
-
 
1130
		else
-
 
1131
			ier = I915_READ(IER);
-
 
1132
		if (!ier) {
-
 
1133
			DRM_ERROR("something (likely vbetool) disabled "
-
 
1134
				  "interrupts, re-enabling\n");
-
 
1135
//           ring->dev->driver->irq_preinstall(ring->dev);
-
 
1136
//           ring->dev->driver->irq_postinstall(ring->dev);
-
 
1137
		}
-
 
1138
 
-
 
1139
		trace_i915_gem_request_wait_begin(ring, seqno);
-
 
1140
 
-
 
1141
		ring->waiting_seqno = seqno;
-
 
1142
        if (ring->irq_get(ring)) {
-
 
1143
//            printf("enter wait\n");
-
 
1144
            wait_event(ring->irq_queue,
-
 
1145
                      i915_seqno_passed(ring->get_seqno(ring), seqno)
-
 
1146
                      || atomic_read(&dev_priv->mm.wedged));
-
 
1147
 
-
 
1148
           ring->irq_put(ring);
-
 
1149
        } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
-
 
1150
							     seqno) ||
-
 
1151
					   atomic_read(&dev_priv->mm.wedged), 3000))
-
 
1152
			ret = -EBUSY;
-
 
1153
		ring->waiting_seqno = 0;
-
 
1154
 
-
 
1155
		trace_i915_gem_request_wait_end(ring, seqno);
-
 
1156
	}
-
 
1157
	if (atomic_read(&dev_priv->mm.wedged))
-
 
1158
		ret = -EAGAIN;
-
 
1159
 
-
 
1160
	if (ret && ret != -ERESTARTSYS)
-
 
1161
		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-
 
1162
			  __func__, ret, seqno, ring->get_seqno(ring),
-
 
1163
			  dev_priv->next_seqno);
-
 
1164
 
-
 
1165
	/* Directly dispatch request retiring.  While we have the work queue
-
 
1166
	 * to handle this, the waiter on a request often wants an associated
-
 
1167
	 * buffer to have made it to the inactive list, and we would need
-
 
1168
	 * a separate wait queue to handle that.
-
 
1169
	 */
-
 
1170
	if (ret == 0)
-
 
1171
		i915_gem_retire_requests_ring(ring);
-
 
1172
 
948
 
1173
	return ret;
949
 
1174
}
950
 
1175
 
951
/**
1176
/**
952
 * Ensures that all rendering to the object has completed and the object is
1177
 * Ensures that all rendering to the object has completed and the object is
Line 964... Line 1189...
964
 
1189
 
965
	/* If there is rendering queued on the buffer being evicted, wait for
1190
	/* If there is rendering queued on the buffer being evicted, wait for
966
	 * it.
1191
	 * it.
967
	 */
1192
	 */
968
	if (obj->active) {
1193
	if (obj->active) {
969
//		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
1194
       ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
970
//		if (ret)
1195
       if (ret)
971
//			return ret;
1196
           return ret;
Line 972... Line 1197...
972
	}
1197
	}
973
 
1198
 
Line 1164... Line 1389...
1164
	}
1389
	}
Line 1165... Line 1390...
1165
 
1390
 
1166
	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
1391
	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
1167
		if (!ring_passed_seqno(obj->last_fenced_ring,
1392
		if (!ring_passed_seqno(obj->last_fenced_ring,
1168
				       obj->last_fenced_seqno)) {
1393
				       obj->last_fenced_seqno)) {
1169
//           ret = i915_wait_request(obj->last_fenced_ring,
1394
			ret = i915_wait_request(obj->last_fenced_ring,
1170
//                       obj->last_fenced_seqno);
1395
						obj->last_fenced_seqno);
1171
//           if (ret)
1396
			if (ret)
1172
//               return ret;
1397
				return ret;
Line 1173... Line 1398...
1173
		}
1398
		}
1174
 
1399
 
1175
		obj->last_fenced_seqno = 0;
1400
		obj->last_fenced_seqno = 0;
Line 1599... Line 1824...
1599
					    old_write_domain);
1824
					    old_write_domain);
Line 1600... Line 1825...
1600
 
1825
 
1601
	return 0;
1826
	return 0;
Line 1602... Line -...
1602
}
-
 
1603
 
1827
}
1604
#if 0
1828
 
1605
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1829
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1606
				    enum i915_cache_level cache_level)
1830
				    enum i915_cache_level cache_level)
Line 1659... Line 1883...
1659
    }
1883
    }
Line 1660... Line 1884...
1660
 
1884
 
1661
	obj->cache_level = cache_level;
1885
	obj->cache_level = cache_level;
1662
	return 0;
1886
	return 0;
1663
}
-
 
Line 1664... Line 1887...
1664
#endif
1887
}
1665
 
1888
 
1666
/*
1889
/*
1667
 * Prepare buffer for display plane (scanout, cursors, etc).
1890
 * Prepare buffer for display plane (scanout, cursors, etc).
Line 1773... Line 1996...
1773
	if (ret)
1996
	if (ret)
1774
		return ret;
1997
		return ret;
Line 1775... Line 1998...
1775
 
1998
 
Line 1776... Line -...
1776
	i915_gem_object_flush_gtt_write_domain(obj);
-
 
1777
 
-
 
1778
	/* If we have a partially-valid cache of the object in the CPU,
-
 
1779
	 * finish invalidating it and free the per-page flags.
-
 
Line 1780... Line 1999...
1780
	 */
1999
	i915_gem_object_flush_gtt_write_domain(obj);
1781
	i915_gem_object_set_to_full_cpu_read_domain(obj);
2000
 
Line 1782... Line 2001...
1782
 
2001
 
Line 1835... Line 2054...
1835
}
2054
}
Line -... Line 2055...
-
 
2055
 
-
 
2056
 
-
 
2057
 
1836
 
2058
 
Line 1865... Line 2087...
1865
	struct drm_device *dev = obj->base.dev;
2087
	struct drm_device *dev = obj->base.dev;
1866
	struct drm_i915_private *dev_priv = dev->dev_private;
2088
	struct drm_i915_private *dev_priv = dev->dev_private;
1867
	int ret;
2089
	int ret;
Line 1868... Line 2090...
1868
 
2090
 
-
 
2091
	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Line 1869... Line 2092...
1869
	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
2092
	WARN_ON(i915_verify_lists(dev));
1870
 
2093
 
1871
#if 0
2094
#if 0
1872
	if (obj->gtt_space != NULL) {
2095
	if (obj->gtt_space != NULL) {