Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4539 Rev 4560
Line 63... Line 63...
63
 
63
 
64
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
64
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
65
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
65
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
66
						   bool force);
66
						   bool force);
-
 
67
static __must_check int
-
 
68
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-
 
69
			       bool readonly);
67
static __must_check int
70
static __must_check int
68
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
71
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
69
			   struct i915_address_space *vm,
72
			   struct i915_address_space *vm,
70
						    unsigned alignment,
73
						    unsigned alignment,
71
						    bool map_and_fenceable,
74
						    bool map_and_fenceable,
Line 79... Line 82...
79
				 struct drm_i915_gem_object *obj);
82
				 struct drm_i915_gem_object *obj);
80
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
83
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
81
					 struct drm_i915_fence_reg *fence,
84
					 struct drm_i915_fence_reg *fence,
82
					 bool enable);
85
					 bool enable);
Line 83... Line 86...
83
 
86
 
84
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
87
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
85
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
88
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Line 86... Line 89...
86
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
89
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
87
 
90
 
88
static bool cpu_cache_is_coherent(struct drm_device *dev,
91
static bool cpu_cache_is_coherent(struct drm_device *dev,
Line 281... Line 284...
281
i915_gem_dumb_create(struct drm_file *file,
284
i915_gem_dumb_create(struct drm_file *file,
282
		     struct drm_device *dev,
285
		     struct drm_device *dev,
283
		     struct drm_mode_create_dumb *args)
286
		     struct drm_mode_create_dumb *args)
284
{
287
{
285
	/* have to work out size/pitch and return them */
288
	/* have to work out size/pitch and return them */
286
	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
289
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
287
	args->size = args->pitch * args->height;
290
	args->size = args->pitch * args->height;
288
	return i915_gem_create(file, dev,
291
	return i915_gem_create(file, dev,
289
			       args->size, &args->handle);
292
			       args->size, &args->handle);
290
}
293
}
Line 458... Line 461...
458
		/* If we're not in the cpu read domain, set ourself into the gtt
461
		/* If we're not in the cpu read domain, set ourself into the gtt
459
		 * read domain and manually flush cachelines (if required). This
462
		 * read domain and manually flush cachelines (if required). This
460
		 * optimizes for the case when the gpu will dirty the data
463
		 * optimizes for the case when the gpu will dirty the data
461
		 * anyway again before the next pread happens. */
464
		 * anyway again before the next pread happens. */
462
		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
465
		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
463
		if (i915_gem_obj_bound_any(obj)) {
-
 
464
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
466
		ret = i915_gem_object_wait_rendering(obj, true);
465
			if (ret)
467
			if (ret)
466
				return ret;
468
				return ret;
467
		}
469
		}
468
	}
-
 
Line 469... Line 470...
469
 
470
 
470
	ret = i915_gem_object_get_pages(obj);
471
	ret = i915_gem_object_get_pages(obj);
471
	if (ret)
472
	if (ret)
Line 773... Line 774...
773
		/* If we're not in the cpu write domain, set ourself into the gtt
774
		/* If we're not in the cpu write domain, set ourself into the gtt
774
		 * write domain and manually flush cachelines (if required). This
775
		 * write domain and manually flush cachelines (if required). This
775
		 * optimizes for the case when the gpu will use the data
776
		 * optimizes for the case when the gpu will use the data
776
		 * right away and we therefore have to clflush anyway. */
777
		 * right away and we therefore have to clflush anyway. */
777
		needs_clflush_after = cpu_write_needs_clflush(obj);
778
		needs_clflush_after = cpu_write_needs_clflush(obj);
778
		if (i915_gem_obj_bound_any(obj)) {
-
 
779
			ret = i915_gem_object_set_to_gtt_domain(obj, true);
779
		ret = i915_gem_object_wait_rendering(obj, false);
780
			if (ret)
780
			if (ret)
781
				return ret;
781
				return ret;
782
		}
782
		}
783
	}
-
 
784
	/* Same trick applies to invalidate partially written cachelines read
783
	/* Same trick applies to invalidate partially written cachelines read
785
	 * before writing. */
784
	 * before writing. */
786
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
785
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
787
		needs_clflush_before =
786
		needs_clflush_before =
788
			!cpu_cache_is_coherent(dev, obj->cache_level);
787
			!cpu_cache_is_coherent(dev, obj->cache_level);
Line 980... Line 979...
980
	int ret;
979
	int ret;
Line 981... Line 980...
981
 
980
 
Line 982... Line 981...
982
	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
981
	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
983
 
982
 
984
	ret = 0;
983
	ret = 0;
Line 985... Line 984...
985
	if (seqno == ring->outstanding_lazy_request)
984
	if (seqno == ring->outstanding_lazy_seqno)
986
		ret = i915_add_request(ring, NULL);
985
		ret = i915_add_request(ring, NULL);
Line -... Line 986...
-
 
986
 
-
 
987
	return ret;
-
 
988
}
-
 
989
 
-
 
990
static void fake_irq(unsigned long data)
-
 
991
{
-
 
992
//	wake_up_process((struct task_struct *)data);
-
 
993
}
-
 
994
 
-
 
995
static bool missed_irq(struct drm_i915_private *dev_priv,
-
 
996
		       struct intel_ring_buffer *ring)
-
 
997
{
-
 
998
	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
-
 
999
}
-
 
1000
 
-
 
1001
static bool can_wait_boost(struct drm_i915_file_private *file_priv)
-
 
1002
{
-
 
1003
	if (file_priv == NULL)
-
 
1004
		return true;
987
 
1005
 
988
	return ret;
1006
	return !atomic_xchg(&file_priv->rps_wait_boost, true);
989
}
1007
}
990
 
1008
 
991
/**
1009
/**
Line 1006... Line 1024...
1006
 * Returns 0 if the seqno was found within the alloted time. Else returns the
1024
 * Returns 0 if the seqno was found within the alloted time. Else returns the
1007
 * errno with remaining time filled in timeout argument.
1025
 * errno with remaining time filled in timeout argument.
1008
 */
1026
 */
1009
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1027
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1010
			unsigned reset_counter,
1028
			unsigned reset_counter,
-
 
1029
			bool interruptible,
1011
			bool interruptible, struct timespec *timeout)
1030
			struct timespec *timeout,
-
 
1031
			struct drm_i915_file_private *file_priv)
1012
{
1032
{
1013
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1033
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-
 
1034
	const bool irq_test_in_progress =
-
 
1035
		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1014
	struct timespec before, now, wait_time={1,0};
1036
	struct timespec before, now;
1015
	unsigned long timeout_jiffies;
1037
    unsigned long timeout_expire, wait_time;
1016
	long end;
-
 
1017
	bool wait_forever = true;
1038
    wait_queue_t __wait;
1018
	int ret;
1039
	int ret;
Line 1019... Line 1040...
1019
 
1040
 
Line 1020... Line 1041...
1020
	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1041
	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1021
 
1042
 
Line -... Line 1043...
-
 
1043
	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1022
	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1044
		return 0;
Line -... Line 1045...
-
 
1045
 
-
 
1046
    timeout_expire = timeout ? GetTimerTicks() + timespec_to_jiffies_timeout(timeout) : 0;
1023
		return 0;
1047
    wait_time = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
-
 
1048
 
1024
 
1049
	if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1025
	trace_i915_gem_request_wait_begin(ring, seqno);
1050
		gen6_rps_boost(dev_priv);
1026
 
1051
		if (file_priv)
Line 1027... Line -...
1027
	if (timeout != NULL) {
-
 
1028
		wait_time = *timeout;
-
 
1029
		wait_forever = false;
1052
			mod_delayed_work(dev_priv->wq,
1030
	}
1053
					 &file_priv->mm.idle_work,
Line 1031... Line 1054...
1031
 
1054
					 msecs_to_jiffies(100));
1032
	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1055
	}
Line 1033... Line -...
1033
 
-
 
1034
	if (WARN_ON(!ring->irq_get(ring)))
1056
 
1035
		return -ENODEV;
1057
	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1036
 
-
 
1037
    /* Record current time in case interrupted by signal, or wedged * */
1058
		return -ENODEV;
1038
	getrawmonotonic(&before);
1059
 
1039
 
-
 
1040
#define EXIT_COND \
-
 
1041
	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1060
    INIT_LIST_HEAD(&__wait.task_list);
1042
	 i915_reset_in_progress(&dev_priv->gpu_error) || \
-
 
1043
	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-
 
1044
	do {
-
 
Line 1045... Line 1061...
1045
		if (interruptible)
1061
    __wait.evnt = CreateEvent(NULL, MANUAL_DESTROY);
1046
			end = wait_event_interruptible_timeout(ring->irq_queue,
1062
 
1047
							       EXIT_COND,
1063
	/* Record current time in case interrupted by signal, or wedged */
1048
							       timeout_jiffies);
-
 
1049
		else
-
 
1050
			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1064
	trace_i915_gem_request_wait_begin(ring, seqno);
1051
						 timeout_jiffies);
1065
 
1052
 
1066
	for (;;) {
1053
		/* We need to check whether any gpu reset happened in between
1067
        unsigned long flags;
1054
		 * the caller grabbing the seqno and now ... */
1068
 
1055
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1069
		/* We need to check whether any gpu reset happened in between
1056
			end = -EAGAIN;
1070
		 * the caller grabbing the seqno and now ... */
1057
 
-
 
Line 1058... Line -...
1058
		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
-
 
1059
		 * gone. */
1071
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1060
		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1072
			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
-
 
1073
			 * is truely gone. */
-
 
1074
			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Line 1061... Line -...
1061
		if (ret)
-
 
1062
			end = ret;
1075
			if (ret == 0)
1063
	} while (end == 0 && wait_forever);
1076
				ret = -EAGAIN;
-
 
1077
			break;
1064
 
1078
		}
Line 1065... Line 1079...
1065
	getrawmonotonic(&now);
1079
 
1066
 
1080
		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1067
	ring->irq_put(ring);
1081
			ret = 0;
1068
	trace_i915_gem_request_wait_end(ring, seqno);
1082
			break;
-
 
1083
		}
1069
#undef EXIT_COND
1084
 
-
 
1085
        if (timeout && time_after_eq(GetTimerTicks(), timeout_expire)) {
1070
 
1086
			ret = -ETIME;
1071
	if (timeout) {
1087
			break;
1072
//		struct timespec sleep_time = timespec_sub(now, before);
1088
		}
1073
//		*timeout = timespec_sub(*timeout, sleep_time);
1089
 
1074
	}
-
 
1075
 
1090
        spin_lock_irqsave(&ring->irq_queue.lock, flags);
-
 
1091
        if (list_empty(&__wait.task_list))
-
 
1092
            __add_wait_queue(&ring->irq_queue, &__wait);
-
 
1093
        spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
-
 
1094
 
-
 
1095
        WaitEventTimeout(__wait.evnt, 1);
-
 
1096
 
-
 
1097
        if (!list_empty(&__wait.task_list)) {
-
 
1098
            spin_lock_irqsave(&ring->irq_queue.lock, flags);
-
 
1099
            list_del_init(&__wait.task_list);
1076
	switch (end) {
1100
            spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
Line 1077... Line 1101...
1077
	case -EIO:
1101
        }
1078
	case -EAGAIN: /* Wedged */
1102
    };
1079
	case -ERESTARTSYS: /* Signal */
1103
    trace_i915_gem_request_wait_end(ring, seqno);
Line 1109... Line 1133...
1109
	if (ret)
1133
	if (ret)
1110
		return ret;
1134
		return ret;
Line 1111... Line 1135...
1111
 
1135
 
1112
	return __wait_seqno(ring, seqno,
1136
	return __wait_seqno(ring, seqno,
1113
			    atomic_read(&dev_priv->gpu_error.reset_counter),
1137
			    atomic_read(&dev_priv->gpu_error.reset_counter),
1114
			    interruptible, NULL);
1138
			    interruptible, NULL, NULL);
Line 1115... Line 1139...
1115
}
1139
}
1116
 
1140
 
1117
static int
1141
static int
Line 1159... Line 1183...
1159
/* A nonblocking variant of the above wait. This is a highly dangerous routine
1183
/* A nonblocking variant of the above wait. This is a highly dangerous routine
1160
 * as the object state may change during this call.
1184
 * as the object state may change during this call.
1161
 */
1185
 */
1162
static __must_check int
1186
static __must_check int
1163
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1187
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-
 
1188
					    struct drm_file *file,
1164
					    bool readonly)
1189
					    bool readonly)
1165
{
1190
{
1166
	struct drm_device *dev = obj->base.dev;
1191
	struct drm_device *dev = obj->base.dev;
1167
	struct drm_i915_private *dev_priv = dev->dev_private;
1192
	struct drm_i915_private *dev_priv = dev->dev_private;
1168
	struct intel_ring_buffer *ring = obj->ring;
1193
	struct intel_ring_buffer *ring = obj->ring;
Line 1185... Line 1210...
1185
	if (ret)
1210
	if (ret)
1186
		return ret;
1211
		return ret;
Line 1187... Line 1212...
1187
 
1212
 
1188
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1213
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1189
	mutex_unlock(&dev->struct_mutex);
1214
	mutex_unlock(&dev->struct_mutex);
1190
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1215
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
1191
	mutex_lock(&dev->struct_mutex);
1216
	mutex_lock(&dev->struct_mutex);
1192
	if (ret)
1217
	if (ret)
Line 1193... Line 1218...
1193
		return ret;
1218
		return ret;
Line 1234... Line 1259...
1234
 
1259
 
1235
	/* Try to flush the object off the GPU without holding the lock.
1260
	/* Try to flush the object off the GPU without holding the lock.
1236
	 * We will repeat the flush holding the lock in the normal manner
1261
	 * We will repeat the flush holding the lock in the normal manner
1237
	 * to catch cases where we are gazumped.
1262
	 * to catch cases where we are gazumped.
1238
	 */
1263
	 */
1239
	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1264
	ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
1240
	if (ret)
1265
	if (ret)
Line 1241... Line 1266...
1241
		goto unref;
1266
		goto unref;
1242
 
1267
 
Line 1749... Line 1774...
1749
				       &dev_priv->mm.fence_list);
1774
				       &dev_priv->mm.fence_list);
1750
		}
1775
		}
1751
	}
1776
	}
1752
}
1777
}
Line -... Line 1778...
-
 
1778
 
-
 
1779
void i915_vma_move_to_active(struct i915_vma *vma,
-
 
1780
			     struct intel_ring_buffer *ring)
-
 
1781
{
-
 
1782
	list_move_tail(&vma->mm_list, &vma->vm->active_list);
-
 
1783
	return i915_gem_object_move_to_active(vma->obj, ring);
-
 
1784
}
1753
 
1785
 
1754
static void
1786
static void
1755
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1787
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1756
{
1788
{
1757
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1789
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Line 1870... Line 1902...
1870
	 */
1902
	 */
1871
   ret = intel_ring_flush_all_caches(ring);
1903
   ret = intel_ring_flush_all_caches(ring);
1872
   if (ret)
1904
   if (ret)
1873
       return ret;
1905
       return ret;
Line 1874... Line 1906...
1874
 
1906
 
1875
	request = kmalloc(sizeof(*request), GFP_KERNEL);
1907
	request = ring->preallocated_lazy_request;
1876
	if (request == NULL)
1908
	if (WARN_ON(request == NULL))
Line 1877... Line -...
1877
		return -ENOMEM;
-
 
1878
 
1909
		return -ENOMEM;
1879
 
1910
 
1880
	/* Record the position of the start of the request so that
1911
	/* Record the position of the start of the request so that
1881
	 * should we detect the updated seqno part-way through the
1912
	 * should we detect the updated seqno part-way through the
1882
    * GPU processing the request, we never over-estimate the
1913
    * GPU processing the request, we never over-estimate the
1883
	 * position of the head.
1914
	 * position of the head.
Line 1884... Line 1915...
1884
	 */
1915
	 */
1885
   request_ring_position = intel_ring_get_tail(ring);
1916
   request_ring_position = intel_ring_get_tail(ring);
1886
 
-
 
1887
	ret = ring->add_request(ring);
1917
 
1888
	if (ret) {
-
 
Line 1889... Line 1918...
1889
		kfree(request);
1918
	ret = ring->add_request(ring);
1890
		return ret;
1919
	if (ret)
1891
	}
1920
		return ret;
1892
 
1921
 
1893
	request->seqno = intel_ring_get_seqno(ring);
-
 
1894
	request->ring = ring;
-
 
Line 1895... Line 1922...
1895
	request->head = request_start;
1922
	request->seqno = intel_ring_get_seqno(ring);
1896
	request->tail = request_ring_position;
1923
	request->ring = ring;
1897
	request->ctx = ring->last_context;
1924
	request->head = request_start;
1898
	request->batch_obj = obj;
1925
	request->tail = request_ring_position;
1899
 
1926
 
1900
	/* Whilst this request exists, batch_obj will be on the
1927
	/* Whilst this request exists, batch_obj will be on the
-
 
1928
	 * active_list, and so will hold the active reference. Only when this
Line -... Line 1929...
-
 
1929
	 * request is retired will the the batch_obj be moved onto the
-
 
1930
	 * inactive_list and lose its active reference. Hence we do not need
-
 
1931
	 * to explicitly hold another reference here.
-
 
1932
	 */
1901
	 * active_list, and so will hold the active reference. Only when this
1933
	request->batch_obj = obj;
1902
	 * request is retired will the the batch_obj be moved onto the
1934
 
Line 1903... Line 1935...
1903
	 * inactive_list and lose its active reference. Hence we do not need
1935
	/* Hold a reference to the current context so that we can inspect
1904
	 * to explicitly hold another reference here.
1936
	 * it later in case a hangcheck error event fires.
Line 1921... Line 1953...
1921
			      &file_priv->mm.request_list);
1953
			      &file_priv->mm.request_list);
1922
		spin_unlock(&file_priv->mm.lock);
1954
		spin_unlock(&file_priv->mm.lock);
1923
	}
1955
	}
Line 1924... Line 1956...
1924
 
1956
 
1925
	trace_i915_gem_request_add(ring, request->seqno);
1957
	trace_i915_gem_request_add(ring, request->seqno);
-
 
1958
	ring->outstanding_lazy_seqno = 0;
Line 1926... Line 1959...
1926
	ring->outstanding_lazy_request = 0;
1959
	ring->preallocated_lazy_request = NULL;
1927
 
1960
 
Line 1928... Line 1961...
1928
	if (!dev_priv->ums.mm_suspended) {
1961
	if (!dev_priv->ums.mm_suspended) {
Line 1948... Line 1981...
1948
 
1981
 
1949
	if (!file_priv)
1982
	if (!file_priv)
Line 1950... Line 1983...
1950
		return;
1983
		return;
1951
 
-
 
1952
	spin_lock(&file_priv->mm.lock);
1984
 
1953
	if (request->file_priv) {
1985
	spin_lock(&file_priv->mm.lock);
1954
		list_del(&request->client_list);
-
 
1955
		request->file_priv = NULL;
1986
		list_del(&request->client_list);
1956
	}
1987
		request->file_priv = NULL;
Line 1957... Line 1988...
1957
	spin_unlock(&file_priv->mm.lock);
1988
	spin_unlock(&file_priv->mm.lock);
1958
}
1989
}
Line 2016... Line 2047...
2016
	}
2047
	}
Line 2017... Line 2048...
2017
 
2048
 
2018
	return false;
2049
	return false;
Line -... Line 2050...
-
 
2050
}
-
 
2051
 
-
 
2052
static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
-
 
2053
{
-
 
2054
    const unsigned long elapsed = GetTimerTicks()/100 - hs->guilty_ts;
-
 
2055
 
-
 
2056
	if (hs->banned)
-
 
2057
		return true;
-
 
2058
 
-
 
2059
	if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
-
 
2060
		DRM_ERROR("context hanging too fast, declaring banned!\n");
-
 
2061
		return true;
-
 
2062
	}
-
 
2063
 
-
 
2064
	return false;
2019
}
2065
}
2020
 
2066
 
2021
static void i915_set_reset_status(struct intel_ring_buffer *ring,
2067
static void i915_set_reset_status(struct intel_ring_buffer *ring,
2022
				  struct drm_i915_gem_request *request,
2068
				  struct drm_i915_gem_request *request,
2023
				  u32 acthd)
2069
				  u32 acthd)
Line 2033... Line 2079...
2033
		offset = i915_gem_obj_offset(request->batch_obj,
2079
		offset = i915_gem_obj_offset(request->batch_obj,
2034
					     request_to_vm(request));
2080
					     request_to_vm(request));
Line 2035... Line 2081...
2035
 
2081
 
2036
	if (ring->hangcheck.action != HANGCHECK_WAIT &&
2082
	if (ring->hangcheck.action != HANGCHECK_WAIT &&
2037
	    i915_request_guilty(request, acthd, &inside)) {
2083
	    i915_request_guilty(request, acthd, &inside)) {
2038
		DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2084
		DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2039
			  ring->name,
2085
			  ring->name,
2040
			  inside ? "inside" : "flushing",
2086
			  inside ? "inside" : "flushing",
2041
			  offset,
2087
			  offset,
2042
			  request->ctx ? request->ctx->id : 0,
2088
			  request->ctx ? request->ctx->id : 0,
Line 2052... Line 2098...
2052
		hs = &request->ctx->hang_stats;
2098
		hs = &request->ctx->hang_stats;
2053
	else if (request->file_priv)
2099
	else if (request->file_priv)
2054
		hs = &request->file_priv->hang_stats;
2100
		hs = &request->file_priv->hang_stats;
Line 2055... Line 2101...
2055
 
2101
 
2056
	if (hs) {
2102
	if (hs) {
-
 
2103
		if (guilty) {
2057
		if (guilty)
2104
			hs->banned = i915_context_is_banned(hs);
-
 
2105
			hs->batch_active++;
2058
			hs->batch_active++;
2106
            hs->guilty_ts = GetTimerTicks()/100;
2059
		else
2107
		} else {
2060
			hs->batch_pending++;
2108
			hs->batch_pending++;
2061
	}
2109
	}
-
 
2110
	}
Line 2062... Line 2111...
2062
}
2111
}
2063
 
2112
 
2064
static void i915_gem_free_request(struct drm_i915_gem_request *request)
2113
static void i915_gem_free_request(struct drm_i915_gem_request *request)
2065
{
2114
{
Line 2088... Line 2137...
2088
}
2137
}
Line 2089... Line 2138...
2089
 
2138
 
2090
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2139
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2091
					struct intel_ring_buffer *ring)
2140
					struct intel_ring_buffer *ring)
2092
{
-
 
2093
	while (!list_empty(&ring->request_list)) {
-
 
2094
		struct drm_i915_gem_request *request;
-
 
2095
 
-
 
2096
		request = list_first_entry(&ring->request_list,
-
 
2097
					   struct drm_i915_gem_request,
-
 
2098
					   list);
-
 
2099
 
-
 
2100
		i915_gem_free_request(request);
-
 
2101
	}
-
 
2102
 
2141
{
2103
	while (!list_empty(&ring->active_list)) {
2142
	while (!list_empty(&ring->active_list)) {
Line 2104... Line 2143...
2104
		struct drm_i915_gem_object *obj;
2143
		struct drm_i915_gem_object *obj;
2105
 
2144
 
2106
		obj = list_first_entry(&ring->active_list,
2145
		obj = list_first_entry(&ring->active_list,
Line 2107... Line 2146...
2107
				       struct drm_i915_gem_object,
2146
				       struct drm_i915_gem_object,
2108
				       ring_list);
2147
				       ring_list);
-
 
2148
 
-
 
2149
		i915_gem_object_move_to_inactive(obj);
-
 
2150
	}
-
 
2151
 
-
 
2152
	/*
-
 
2153
	 * We must free the requests after all the corresponding objects have
-
 
2154
	 * been moved off active lists. Which is the same order as the normal
-
 
2155
	 * retire_requests function does. This is important if object hold
-
 
2156
	 * implicit references on things like e.g. ppgtt address spaces through
-
 
2157
	 * the request.
-
 
2158
	 */
-
 
2159
	while (!list_empty(&ring->request_list)) {
-
 
2160
		struct drm_i915_gem_request *request;
-
 
2161
 
-
 
2162
		request = list_first_entry(&ring->request_list,
-
 
2163
					   struct drm_i915_gem_request,
-
 
2164
					   list);
2109
 
2165
 
Line 2110... Line 2166...
2110
		i915_gem_object_move_to_inactive(obj);
2166
		i915_gem_free_request(request);
2111
	}
2167
	}
2112
}
2168
}
Line 2147... Line 2203...
2147
		i915_gem_reset_ring_status(dev_priv, ring);
2203
		i915_gem_reset_ring_status(dev_priv, ring);
Line 2148... Line 2204...
2148
 
2204
 
2149
	for_each_ring(ring, dev_priv, i)
2205
	for_each_ring(ring, dev_priv, i)
Line -... Line 2206...
-
 
2206
		i915_gem_reset_ring_cleanup(dev_priv, ring);
-
 
2207
 
2150
		i915_gem_reset_ring_cleanup(dev_priv, ring);
2208
	i915_gem_cleanup_ringbuffer(dev);
2151
 
2209
 
Line 2152... Line 2210...
2152
	i915_gem_restore_fences(dev);
2210
	i915_gem_restore_fences(dev);
2153
}
2211
}
Line 2211... Line 2269...
2211
	}
2269
	}
Line 2212... Line 2270...
2212
 
2270
 
2213
	WARN_ON(i915_verify_lists(ring->dev));
2271
	WARN_ON(i915_verify_lists(ring->dev));
Line 2214... Line 2272...
2214
}
2272
}
2215
 
2273
 
2216
void
2274
bool
2217
i915_gem_retire_requests(struct drm_device *dev)
2275
i915_gem_retire_requests(struct drm_device *dev)
2218
{
2276
{
-
 
2277
	drm_i915_private_t *dev_priv = dev->dev_private;
2219
	drm_i915_private_t *dev_priv = dev->dev_private;
2278
	struct intel_ring_buffer *ring;
Line 2220... Line 2279...
2220
	struct intel_ring_buffer *ring;
2279
	bool idle = true;
2221
	int i;
2280
	int i;
-
 
2281
 
-
 
2282
	for_each_ring(ring, dev_priv, i) {
-
 
2283
		i915_gem_retire_requests_ring(ring);
-
 
2284
		idle &= list_empty(&ring->request_list);
-
 
2285
	}
-
 
2286
 
-
 
2287
	if (idle)
-
 
2288
		mod_delayed_work(dev_priv->wq,
-
 
2289
				   &dev_priv->mm.idle_work,
2222
 
2290
				   msecs_to_jiffies(100));
Line 2223... Line 2291...
2223
	for_each_ring(ring, dev_priv, i)
2291
 
2224
		i915_gem_retire_requests_ring(ring);
2292
	return idle;
2225
}
2293
}
2226
 
2294
 
2227
static void
2295
static void
2228
i915_gem_retire_work_handler(struct work_struct *work)
2296
i915_gem_retire_work_handler(struct work_struct *work)
2229
{
2297
{
2230
	drm_i915_private_t *dev_priv;
-
 
2231
	struct drm_device *dev;
-
 
2232
	struct intel_ring_buffer *ring;
-
 
2233
	bool idle;
-
 
2234
	int i;
-
 
Line 2235... Line 2298...
2235
 
2298
	struct drm_i915_private *dev_priv =
-
 
2299
		container_of(work, typeof(*dev_priv), mm.retire_work.work);
2236
	dev_priv = container_of(work, drm_i915_private_t,
2300
	struct drm_device *dev = dev_priv->dev;
-
 
2301
	bool idle;
-
 
2302
 
-
 
2303
	/* Come back later if the device is busy... */
-
 
2304
	idle = false;
2237
				mm.retire_work.work);
2305
	if (mutex_trylock(&dev->struct_mutex)) {
2238
	dev = dev_priv->dev;
2306
		idle = i915_gem_retire_requests(dev);
2239
 
-
 
2240
	/* Come back later if the device is busy... */
-
 
2241
	if (!mutex_trylock(&dev->struct_mutex)) {
-
 
2242
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
-
 
2243
				   round_jiffies_up_relative(HZ));
-
 
2244
        return;
-
 
2245
	}
-
 
2246
 
-
 
2247
	i915_gem_retire_requests(dev);
-
 
2248
 
-
 
2249
	/* Send a periodic flush down the ring so we don't hold onto GEM
-
 
2250
	 * objects indefinitely.
-
 
2251
	 */
-
 
2252
	idle = true;
-
 
2253
	for_each_ring(ring, dev_priv, i) {
2307
		mutex_unlock(&dev->struct_mutex);
Line 2254... Line 2308...
2254
		if (ring->gpu_caches_dirty)
2308
	}
2255
			i915_add_request(ring, NULL);
2309
	if (!idle)
-
 
2310
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2256
 
2311
				   round_jiffies_up_relative(HZ));
2257
		idle &= list_empty(&ring->request_list);
-
 
2258
	}
2312
}
Line 2259... Line 2313...
2259
 
2313
 
2260
	if (!dev_priv->ums.mm_suspended && !idle)
2314
static void
Line 2261... Line 2315...
2261
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2315
i915_gem_idle_work_handler(struct work_struct *work)
2262
				   round_jiffies_up_relative(HZ));
2316
{
2263
	if (idle)
2317
	struct drm_i915_private *dev_priv =
Line 2359... Line 2413...
2359
 
2413
 
2360
	drm_gem_object_unreference(&obj->base);
2414
	drm_gem_object_unreference(&obj->base);
2361
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2415
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Line 2362... Line 2416...
2362
	mutex_unlock(&dev->struct_mutex);
2416
	mutex_unlock(&dev->struct_mutex);
2363
 
2417
 
2364
	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2418
	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2365
	if (timeout)
2419
	if (timeout)
Line 2366... Line 2420...
2366
		args->timeout_ns = timespec_to_ns(timeout);
2420
		args->timeout_ns = timespec_to_ns(timeout);
Line 2406... Line 2460...
2406
 
2460
 
2407
	ret = i915_gem_check_olr(obj->ring, seqno);
2461
	ret = i915_gem_check_olr(obj->ring, seqno);
2408
	if (ret)
2462
	if (ret)
Line -... Line 2463...
-
 
2463
		return ret;
2409
		return ret;
2464
 
2410
 
2465
	trace_i915_gem_ring_sync_to(from, to, seqno);
2411
	ret = to->sync_to(to, from, seqno);
2466
	ret = to->sync_to(to, from, seqno);
2412
	if (!ret)
2467
	if (!ret)
2413
		/* We use last_read_seqno because sync_to()
2468
		/* We use last_read_seqno because sync_to()
Line 2453... Line 2508...
2453
        return 0;
2508
        return 0;
Line 2454... Line 2509...
2454
 
2509
 
2455
	if (list_empty(&vma->vma_link))
2510
	if (list_empty(&vma->vma_link))
Line 2456... Line 2511...
2456
		return 0;
2511
		return 0;
2457
 
2512
 
-
 
2513
	if (!drm_mm_node_allocated(&vma->node)) {
-
 
2514
		i915_gem_vma_destroy(vma);
-
 
2515
 
Line 2458... Line 2516...
2458
	if (!drm_mm_node_allocated(&vma->node))
2516
		return 0;
2459
		goto destroy;
2517
	}
Line 2460... Line 2518...
2460
 
2518
 
Line 2485... Line 2543...
2485
	if (obj->has_aliasing_ppgtt_mapping) {
2543
	if (obj->has_aliasing_ppgtt_mapping) {
2486
		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2544
		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2487
		obj->has_aliasing_ppgtt_mapping = 0;
2545
		obj->has_aliasing_ppgtt_mapping = 0;
2488
	}
2546
	}
2489
	i915_gem_gtt_finish_object(obj);
2547
	i915_gem_gtt_finish_object(obj);
2490
	i915_gem_object_unpin_pages(obj);
-
 
Line 2491... Line 2548...
2491
 
2548
 
2492
	list_del(&vma->mm_list);
2549
	list_del(&vma->mm_list);
2493
	/* Avoid an unnecessary call to unbind on rebind. */
2550
	/* Avoid an unnecessary call to unbind on rebind. */
2494
	if (i915_is_ggtt(vma->vm))
2551
	if (i915_is_ggtt(vma->vm))
Line 2495... Line 2552...
2495
	obj->map_and_fenceable = true;
2552
	obj->map_and_fenceable = true;
2496
 
-
 
2497
	drm_mm_remove_node(&vma->node);
-
 
2498
 
2553
 
Line 2499... Line 2554...
2499
destroy:
2554
	drm_mm_remove_node(&vma->node);
2500
	i915_gem_vma_destroy(vma);
2555
	i915_gem_vma_destroy(vma);
2501
 
-
 
2502
	/* Since the unbound list is global, only move to that list if
-
 
2503
	 * no more VMAs exist.
2556
 
2504
	 * NB: Until we have real VMAs there will only ever be one */
2557
	/* Since the unbound list is global, only move to that list if
Line -... Line 2558...
-
 
2558
	 * no more VMAs exist. */
-
 
2559
	if (list_empty(&obj->vma_list))
-
 
2560
		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
-
 
2561
 
-
 
2562
	/* And finally now the object is completely decoupled from this vma,
-
 
2563
	 * we can drop its hold on the backing storage and allow it to be
2505
	WARN_ON(!list_empty(&obj->vma_list));
2564
	 * reaped by the shrinker.
2506
	if (list_empty(&obj->vma_list))
2565
	 */
Line 2507... Line 2566...
2507
		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2566
	i915_gem_object_unpin_pages(obj);
2508
 
2567
 
Line 2696... Line 2755...
2696
	WARN(obj && (!obj->stride || !obj->tiling_mode),
2755
	WARN(obj && (!obj->stride || !obj->tiling_mode),
2697
	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2756
	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2698
	     obj->stride, obj->tiling_mode);
2757
	     obj->stride, obj->tiling_mode);
Line 2699... Line 2758...
2699
 
2758
 
-
 
2759
	switch (INTEL_INFO(dev)->gen) {
2700
	switch (INTEL_INFO(dev)->gen) {
2760
	case 8:
2701
	case 7:
2761
	case 7:
2702
	case 6:
2762
	case 6:
2703
	case 5:
2763
	case 5:
2704
	case 4: i965_write_fence_reg(dev, reg, obj); break;
2764
	case 4: i965_write_fence_reg(dev, reg, obj); break;
Line 2795... Line 2855...
2795
		if (!reg->pin_count)
2855
		if (!reg->pin_count)
2796
			avail = reg;
2856
			avail = reg;
2797
	}
2857
	}
Line 2798... Line 2858...
2798
 
2858
 
2799
	if (avail == NULL)
2859
	if (avail == NULL)
Line 2800... Line 2860...
2800
		return NULL;
2860
		goto deadlock;
2801
 
2861
 
2802
	/* None available, try to steal one or wait for a user to finish */
2862
	/* None available, try to steal one or wait for a user to finish */
2803
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2863
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Line 2804... Line 2864...
2804
		if (reg->pin_count)
2864
		if (reg->pin_count)
2805
			continue;
2865
			continue;
Line -... Line 2866...
-
 
2866
 
-
 
2867
		return reg;
-
 
2868
	}
-
 
2869
 
-
 
2870
deadlock:
2806
 
2871
	/* Wait for completion of pending flips which consume fences */
2807
		return reg;
2872
//   if (intel_has_pending_fb_unpin(dev))
Line 2808... Line 2873...
2808
	}
2873
//       return ERR_PTR(-EAGAIN);
2809
 
2874
 
2810
	return NULL;
2875
	return ERR_PTR(-EDEADLK);
Line 2850... Line 2915...
2850
				       &dev_priv->mm.fence_list);
2915
				       &dev_priv->mm.fence_list);
2851
			return 0;
2916
			return 0;
2852
		}
2917
		}
2853
	} else if (enable) {
2918
	} else if (enable) {
2854
		reg = i915_find_fence_reg(dev);
2919
		reg = i915_find_fence_reg(dev);
2855
		if (reg == NULL)
2920
		if (IS_ERR(reg))
2856
			return -EDEADLK;
2921
			return PTR_ERR(reg);
Line 2857... Line 2922...
2857
 
2922
 
2858
		if (reg->obj) {
2923
		if (reg->obj) {
Line 2859... Line 2924...
2859
			struct drm_i915_gem_object *old = reg->obj;
2924
			struct drm_i915_gem_object *old = reg->obj;
Line 3192... Line 3257...
3192
					    old_read_domains,
3257
					    old_read_domains,
3193
					    old_write_domain);
3258
					    old_write_domain);
Line 3194... Line 3259...
3194
 
3259
 
3195
	/* And bump the LRU for this access */
3260
	/* And bump the LRU for this access */
3196
	if (i915_gem_object_is_inactive(obj)) {
3261
	if (i915_gem_object_is_inactive(obj)) {
3197
		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-
 
3198
							   &dev_priv->gtt.base);
3262
		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3199
		if (vma)
3263
		if (vma)
3200
			list_move_tail(&vma->mm_list,
3264
			list_move_tail(&vma->mm_list,
Line 3201... Line 3265...
3201
				       &dev_priv->gtt.base.inactive_list);
3265
				       &dev_priv->gtt.base.inactive_list);
Line 3564... Line 3628...
3564
	spin_unlock(&file_priv->mm.lock);
3628
	spin_unlock(&file_priv->mm.lock);
Line 3565... Line 3629...
3565
 
3629
 
3566
	if (seqno == 0)
3630
	if (seqno == 0)
Line 3567... Line 3631...
3567
		return 0;
3631
		return 0;
3568
 
3632
 
3569
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3633
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Line 3570... Line 3634...
3570
	if (ret == 0)
3634
	if (ret == 0)
3571
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3635
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Line 3668... Line 3732...
3668
			  args->handle);
3732
			  args->handle);
3669
		ret = -EINVAL;
3733
		ret = -EINVAL;
3670
		goto out;
3734
		goto out;
3671
	}
3735
	}
Line -... Line 3736...
-
 
3736
 
-
 
3737
	if (obj->user_pin_count == ULONG_MAX) {
-
 
3738
		ret = -EBUSY;
-
 
3739
		goto out;
-
 
3740
	}
3672
 
3741
 
3673
	if (obj->user_pin_count == 0) {
3742
	if (obj->user_pin_count == 0) {
3674
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3743
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3675
		if (ret)
3744
		if (ret)
3676
			goto out;
3745
			goto out;
Line 3821... Line 3890...
3821
void i915_gem_object_init(struct drm_i915_gem_object *obj,
3890
void i915_gem_object_init(struct drm_i915_gem_object *obj,
3822
			  const struct drm_i915_gem_object_ops *ops)
3891
			  const struct drm_i915_gem_object_ops *ops)
3823
{
3892
{
3824
	INIT_LIST_HEAD(&obj->global_list);
3893
	INIT_LIST_HEAD(&obj->global_list);
3825
	INIT_LIST_HEAD(&obj->ring_list);
3894
	INIT_LIST_HEAD(&obj->ring_list);
3826
	INIT_LIST_HEAD(&obj->exec_list);
-
 
3827
	INIT_LIST_HEAD(&obj->obj_exec_link);
3895
	INIT_LIST_HEAD(&obj->obj_exec_link);
3828
	INIT_LIST_HEAD(&obj->vma_list);
3896
	INIT_LIST_HEAD(&obj->vma_list);
Line 3829... Line 3897...
3829
 
3897
 
Line 3879... Line 3947...
3879
		 */
3947
		 */
3880
		obj->cache_level = I915_CACHE_LLC;
3948
		obj->cache_level = I915_CACHE_LLC;
3881
	} else
3949
	} else
3882
		obj->cache_level = I915_CACHE_NONE;
3950
		obj->cache_level = I915_CACHE_NONE;
Line 3883... Line 3951...
3883
 
3951
 
3884
	return obj;
-
 
Line 3885... Line -...
3885
}
-
 
3886
 
-
 
3887
int i915_gem_init_object(struct drm_gem_object *obj)
-
 
3888
{
-
 
3889
	BUG();
3952
	trace_i915_gem_object_create(obj);
3890
 
3953
 
Line 3891... Line 3954...
3891
	return 0;
3954
	return obj;
3892
}
3955
}
3893
 
3956
 
3894
void i915_gem_free_object(struct drm_gem_object *gem_obj)
3957
void i915_gem_free_object(struct drm_gem_object *gem_obj)
3895
{
3958
{
3896
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3959
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Line -... Line 3960...
-
 
3960
	struct drm_device *dev = obj->base.dev;
-
 
3961
	drm_i915_private_t *dev_priv = dev->dev_private;
3897
	struct drm_device *dev = obj->base.dev;
3962
	struct i915_vma *vma, *next;
Line 3898... Line 3963...
3898
	drm_i915_private_t *dev_priv = dev->dev_private;
3963
 
3899
	struct i915_vma *vma, *next;
3964
	intel_runtime_pm_get(dev_priv);
Line 3942... Line 4007...
3942
	drm_gem_object_release(&obj->base);
4007
	drm_gem_object_release(&obj->base);
3943
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
4008
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
Line 3944... Line 4009...
3944
 
4009
 
3945
	kfree(obj->bit_17);
4010
	kfree(obj->bit_17);
-
 
4011
	i915_gem_object_free(obj);
-
 
4012
 
-
 
4013
	intel_runtime_pm_put(dev_priv);
-
 
4014
}
-
 
4015
 
-
 
4016
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-
 
4017
				     struct i915_address_space *vm)
-
 
4018
{
-
 
4019
	struct i915_vma *vma;
-
 
4020
	list_for_each_entry(vma, &obj->vma_list, vma_link)
-
 
4021
		if (vma->vm == vm)
-
 
4022
			return vma;
-
 
4023
 
3946
	i915_gem_object_free(obj);
4024
	return NULL;
Line 3947... Line 4025...
3947
}
4025
}
3948
 
4026
 
3949
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4027
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
3950
				     struct i915_address_space *vm)
4028
				     struct i915_address_space *vm)
3951
{
4029
{
3952
	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4030
	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
Line 3966... Line 4044...
3966
		list_add_tail(&vma->vma_link, &obj->vma_list);
4044
		list_add_tail(&vma->vma_link, &obj->vma_list);
Line 3967... Line 4045...
3967
 
4045
 
3968
	return vma;
4046
	return vma;
Line -... Line 4047...
-
 
4047
}
-
 
4048
 
-
 
4049
struct i915_vma *
-
 
4050
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-
 
4051
				  struct i915_address_space *vm)
-
 
4052
{
-
 
4053
	struct i915_vma *vma;
-
 
4054
 
-
 
4055
	vma = i915_gem_obj_to_vma(obj, vm);
-
 
4056
	if (!vma)
-
 
4057
		vma = __i915_gem_vma_create(obj, vm);
-
 
4058
 
-
 
4059
	return vma;
3969
}
4060
}
3970
 
4061
 
3971
void i915_gem_vma_destroy(struct i915_vma *vma)
4062
void i915_gem_vma_destroy(struct i915_vma *vma)
-
 
4063
{
-
 
4064
	WARN_ON(vma->node.allocated);
-
 
4065
 
-
 
4066
	/* Keep the vma as a placeholder in the execbuffer reservation lists */
-
 
4067
	if (!list_empty(&vma->exec_list))
3972
{
4068
		return;
-
 
4069
 
3973
	WARN_ON(vma->node.allocated);
4070
	list_del(&vma->vma_link);
3974
	list_del(&vma->vma_link);
4071
 
Line 3975... Line 4072...
3975
	kfree(vma);
4072
	kfree(vma);
3976
}
4073
}
3977
 
4074
 
3978
#if 0
4075
#if 0
3979
int
4076
int
3980
i915_gem_idle(struct drm_device *dev)
4077
i915_gem_suspend(struct drm_device *dev)
Line 3981... Line 4078...
3981
{
4078
{
3982
	drm_i915_private_t *dev_priv = dev->dev_private;
4079
	drm_i915_private_t *dev_priv = dev->dev_private;
3983
	int ret;
4080
	int ret = 0;
3984
 
-
 
Line 3985... Line 4081...
3985
	if (dev_priv->ums.mm_suspended) {
4081
 
3986
		mutex_unlock(&dev->struct_mutex);
4082
	mutex_lock(&dev->struct_mutex);
3987
		return 0;
-
 
3988
	}
4083
	if (dev_priv->ums.mm_suspended)
3989
 
4084
		goto err;
3990
	ret = i915_gpu_idle(dev);
4085
 
Line 3991... Line 4086...
3991
	if (ret) {
4086
	ret = i915_gpu_idle(dev);
3992
		mutex_unlock(&dev->struct_mutex);
4087
	if (ret)
3993
		return ret;
4088
		goto err;
Line 3994... Line -...
3994
	}
-
 
3995
	i915_gem_retire_requests(dev);
-
 
3996
 
4089
 
3997
	/* Under UMS, be paranoid and evict. */
4090
	i915_gem_retire_requests(dev);
Line -... Line 4091...
-
 
4091
 
3998
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
4092
	/* Under UMS, be paranoid and evict. */
-
 
4093
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-
 
4094
		i915_gem_evict_everything(dev);
-
 
4095
 
-
 
4096
	i915_kernel_lost_context(dev);
-
 
4097
	i915_gem_cleanup_ringbuffer(dev);
-
 
4098
 
-
 
4099
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
3999
		i915_gem_evict_everything(dev);
4100
	 * We need to replace this with a semaphore, or something.
-
 
4101
	 * And not confound ums.mm_suspended!
Line 4000... Line 4102...
4000
 
4102
	 */
-
 
4103
	dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
-
 
4104
							     DRIVER_MODESET);
-
 
4105
	mutex_unlock(&dev->struct_mutex);
-
 
4106
 
4001
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4107
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4002
 
4108
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Line 4003... Line 4109...
4003
	i915_kernel_lost_context(dev);
4109
	cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4004
	i915_gem_cleanup_ringbuffer(dev);
4110
 
-
 
4111
	return 0;
4005
 
4112
 
4006
	/* Cancel the retire work handler, which should be idle now. */
4113
err:
4007
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-
 
4008
 
-
 
4009
	return 0;
4114
	mutex_unlock(&dev->struct_mutex);
4010
}
4115
	return ret;
Line 4011... Line 4116...
4011
#endif
4116
}
4012
 
4117
#endif
Line 4013... Line -...
4013
void i915_gem_l3_remap(struct drm_device *dev)
-
 
4014
{
4118
 
-
 
4119
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4015
	drm_i915_private_t *dev_priv = dev->dev_private;
4120
{
Line -... Line 4121...
-
 
4121
	struct drm_device *dev = ring->dev;
-
 
4122
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
4123
	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
-
 
4124
	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
-
 
4125
	int i, ret;
4016
	u32 misccpctl;
4126
 
4017
	int i;
4127
	if (!HAS_L3_DPF(dev) || !remap_info)
4018
 
-
 
4019
	if (!HAS_L3_GPU_CACHE(dev))
-
 
4020
		return;
4128
		return 0;
4021
 
4129
 
4022
	if (!dev_priv->l3_parity.remap_info)
-
 
4023
		return;
-
 
4024
 
4130
	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
Line 4025... Line -...
4025
	misccpctl = I915_READ(GEN7_MISCCPCTL);
-
 
4026
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4131
	if (ret)
Line 4027... Line 4132...
4027
	POSTING_READ(GEN7_MISCCPCTL);
4132
		return ret;
4028
 
4133
 
Line 4029... Line 4134...
4029
	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4134
	/*
4030
		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4135
	 * Note: We do not worry about the concurrent register cacheline hang
4031
		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4136
	 * here because no other code should access these registers other than
Line 4059... Line 4164...
4059
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4164
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4060
	if (IS_GEN6(dev))
4165
	if (IS_GEN6(dev))
4061
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4166
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4062
	else if (IS_GEN7(dev))
4167
	else if (IS_GEN7(dev))
4063
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4168
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-
 
4169
	else if (IS_GEN8(dev))
-
 
4170
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4064
	else
4171
	else
4065
		BUG();
4172
		BUG();
4066
}
4173
}
Line 4067... Line 4174...
4067
 
4174
 
Line 4129... Line 4236...
4129
 
4236
 
4130
int
4237
int
4131
i915_gem_init_hw(struct drm_device *dev)
4238
i915_gem_init_hw(struct drm_device *dev)
4132
{
4239
{
4133
	drm_i915_private_t *dev_priv = dev->dev_private;
4240
	drm_i915_private_t *dev_priv = dev->dev_private;
Line 4134... Line 4241...
4134
	int ret;
4241
	int ret, i;
4135
 
4242
 
Line 4136... Line 4243...
4136
	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4243
	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4137
		return -EIO;
4244
		return -EIO;
Line -... Line 4245...
-
 
4245
 
-
 
4246
	if (dev_priv->ellc_size)
-
 
4247
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
-
 
4248
 
4138
 
4249
	if (IS_HASWELL(dev))
4139
	if (dev_priv->ellc_size)
4250
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4140
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4251
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4141
 
4252
 
4142
	if (HAS_PCH_NOP(dev)) {
4253
	if (HAS_PCH_NOP(dev)) {
Line 4143... Line -...
4143
		u32 temp = I915_READ(GEN7_MSG_CTL);
-
 
4144
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-
 
4145
		I915_WRITE(GEN7_MSG_CTL, temp);
4254
		u32 temp = I915_READ(GEN7_MSG_CTL);
Line 4146... Line 4255...
4146
	}
4255
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4147
 
4256
		I915_WRITE(GEN7_MSG_CTL, temp);
4148
	i915_gem_l3_remap(dev);
4257
	}
Line -... Line 4258...
-
 
4258
 
-
 
4259
	i915_gem_init_swizzling(dev);
-
 
4260
 
4149
 
4261
	ret = i915_gem_init_rings(dev);
4150
	i915_gem_init_swizzling(dev);
4262
	if (ret)
4151
 
4263
		return ret;
4152
	ret = i915_gem_init_rings(dev);
4264
 
4153
	if (ret)
4265
	for (i = 0; i < NUM_L3_SLICES(dev); i++)
-
 
4266
		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
-
 
4267
 
-
 
4268
	/*
-
 
4269
	 * XXX: There was some w/a described somewhere suggesting loading
-
 
4270
	 * contexts before PPGTT.
-
 
4271
	 */
4154
		return ret;
4272
	ret = i915_gem_context_init(dev);
4155
 
4273
	if (ret) {
4156
	/*
4274
		i915_gem_cleanup_ringbuffer(dev);
4157
	 * XXX: There was some w/a described somewhere suggesting loading
4275
		DRM_ERROR("Context initialization failed %d\n", ret);
4158
	 * contexts before PPGTT.
4276
		return ret;
Line 4253... Line 4371...
4253
 
4371
 
4254
int
4372
int
4255
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4373
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4256
		       struct drm_file *file_priv)
4374
		       struct drm_file *file_priv)
4257
{
-
 
4258
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4259
	int ret;
-
 
4260
 
4375
{
4261
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4376
	if (drm_core_check_feature(dev, DRIVER_MODESET))
Line 4262... Line 4377...
4262
		return 0;
4377
		return 0;
Line 4263... Line -...
4263
 
-
 
4264
	drm_irq_uninstall(dev);
4378
 
4265
 
-
 
4266
	mutex_lock(&dev->struct_mutex);
-
 
4267
	ret =  i915_gem_idle(dev);
-
 
4268
 
-
 
4269
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
-
 
4270
	 * We need to replace this with a semaphore, or something.
-
 
4271
	 * And not confound ums.mm_suspended!
-
 
4272
	 */
-
 
4273
	if (ret != 0)
-
 
4274
		dev_priv->ums.mm_suspended = 1;
-
 
4275
	mutex_unlock(&dev->struct_mutex);
4379
	drm_irq_uninstall(dev);
Line 4276... Line 4380...
4276
 
4380
 
4277
	return ret;
4381
	return i915_gem_suspend(dev);
4278
}
4382
}
4279
 
4383
 
Line 4280... Line 4384...
4280
void
4384
void
4281
i915_gem_lastclose(struct drm_device *dev)
4385
i915_gem_lastclose(struct drm_device *dev)
Line 4282... Line -...
4282
{
-
 
4283
	int ret;
4386
{
4284
 
4387
	int ret;
4285
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4388
 
4286
		return;
-
 
4287
 
4389
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4288
	mutex_lock(&dev->struct_mutex);
4390
		return;
Line 4289... Line 4391...
4289
	ret = i915_gem_idle(dev);
4391
 
4290
	if (ret)
4392
	ret = i915_gem_suspend(dev);
Line 4317... Line 4419...
4317
    int i;
4419
    int i;
Line 4318... Line 4420...
4318
 
4420
 
4319
	INIT_LIST_HEAD(&dev_priv->vm_list);
4421
	INIT_LIST_HEAD(&dev_priv->vm_list);
Line -... Line 4422...
-
 
4422
	i915_init_vm(dev_priv, &dev_priv->gtt.base);
4320
	i915_init_vm(dev_priv, &dev_priv->gtt.base);
4423
 
4321
 
4424
	INIT_LIST_HEAD(&dev_priv->context_list);
4322
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4425
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4323
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4426
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4324
    INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4427
    INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4325
    for (i = 0; i < I915_NUM_RINGS; i++)
4428
    for (i = 0; i < I915_NUM_RINGS; i++)
4326
        init_ring_lists(&dev_priv->ring[i]);
4429
        init_ring_lists(&dev_priv->ring[i]);
4327
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4430
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4328
        INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4431
        INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
-
 
4432
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
-
 
4433
			  i915_gem_retire_work_handler);
4329
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4434
	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
Line 4330... Line 4435...
4330
			  i915_gem_retire_work_handler);
4435
			  i915_gem_idle_work_handler);
4331
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4436
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4332
 
4437
 
Line 4368... Line 4473...
4368
	int ret;
4473
	int ret;
Line 4369... Line 4474...
4369
 
4474
 
4370
	if (dev_priv->mm.phys_objs[id - 1] || !size)
4475
	if (dev_priv->mm.phys_objs[id - 1] || !size)
Line 4371... Line 4476...
4371
		return 0;
4476
		return 0;
4372
 
4477
 
4373
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4478
	phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Line 4374... Line 4479...
4374
	if (!phys_obj)
4479
	if (!phys_obj)
Line 4606... Line 4711...
4606
	return false;
4711
	return false;
4607
}
4712
}
Line 4608... Line 4713...
4608
 
4713
 
4609
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4714
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4610
{
-
 
4611
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4715
{
Line 4612... Line 4716...
4612
	struct i915_address_space *vm;
4716
	struct i915_vma *vma;
4613
 
4717
 
4614
	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4718
	list_for_each_entry(vma, &o->vma_list, vma_link)
Line 4615... Line 4719...
4615
		if (i915_gem_obj_bound(o, vm))
4719
		if (drm_mm_node_allocated(&vma->node))
4616
			return true;
4720
			return true;
Line 4633... Line 4737...
4633
		if (vma->vm == vm)
4737
		if (vma->vm == vm)
4634
			return vma->node.size;
4738
			return vma->node.size;
Line 4635... Line 4739...
4635
 
4739
 
4636
	return 0;
4740
	return 0;
4637
}
-
 
4638
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-
 
4639
				     struct i915_address_space *vm)
-
 
4640
{
-
 
4641
	struct i915_vma *vma;
-
 
4642
	list_for_each_entry(vma, &obj->vma_list, vma_link)
-
 
4643
		if (vma->vm == vm)
-
 
Line 4644... Line -...
4644
			return vma;
-
 
4645
 
-
 
Line 4646... Line -...
4646
	return NULL;
-
 
4647
}
4741
}
4648
 
-
 
4649
struct i915_vma *
4742
 
4650
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4743
 
Line 4651... Line 4744...
4651
				  struct i915_address_space *vm)
4744
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
4652
{
4745
{
-
 
4746
	struct i915_vma *vma;
4653
	struct i915_vma *vma;
4747
 
-
 
4748
	if (WARN_ON(list_empty(&obj->vma_list)))
-
 
4749
	return NULL;
Line 4654... Line 4750...
4654
 
4750
 
4655
	vma = i915_gem_obj_to_vma(obj, vm);
4751
	vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);