Subversion Repositories Kolibri OS

Rev

Rev 2342 | Rev 2351 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2342 Rev 2344
Line 34... Line 34...
34
//#include 
34
//#include 
35
#include 
35
#include 
36
//#include 
36
//#include 
37
#include 
37
#include 
Line -... Line 38...
-
 
38
 
-
 
39
extern int x86_clflush_size;
-
 
40
 
-
 
41
#undef mb
-
 
42
#undef rmb
-
 
43
#undef wmb
-
 
44
#define mb() asm volatile("mfence")
-
 
45
#define rmb() asm volatile ("lfence")
-
 
46
#define wmb() asm volatile ("sfence")
-
 
47
 
-
 
48
static inline void clflush(volatile void *__p)
-
 
49
{
-
 
50
    asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
Line 38... Line 51...
38
 
51
}
Line 39... Line 52...
39
 
52
 
Line 54... Line 67...
54
static inline long PTR_ERR(const void *ptr)
67
static inline long PTR_ERR(const void *ptr)
55
{
68
{
56
    return (long) ptr;
69
    return (long) ptr;
57
}
70
}
Line -... Line 71...
-
 
71
 
-
 
72
void
-
 
73
drm_gem_object_free(struct kref *kref)
-
 
74
{
-
 
75
    struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-
 
76
    struct drm_device *dev = obj->dev;
-
 
77
 
-
 
78
    BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
 
79
 
-
 
80
    i915_gem_free_object(obj);
Line 58... Line 81...
58
 
81
}
59
 
82
 
60
/**
83
/**
61
 * Initialize an already allocated GEM object of the specified size with
84
 * Initialize an already allocated GEM object of the specified size with
Line 65... Line 88...
65
            struct drm_gem_object *obj, size_t size)
88
            struct drm_gem_object *obj, size_t size)
66
{
89
{
67
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
90
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
Line 68... Line 91...
68
 
91
 
-
 
92
    obj->dev = dev;
69
    obj->dev = dev;
93
    kref_init(&obj->refcount);
70
    atomic_set(&obj->handle_count, 0);
94
    atomic_set(&obj->handle_count, 0);
Line 71... Line 95...
71
    obj->size = size;
95
    obj->size = size;
72
 
96
 
Line -... Line 97...
-
 
97
    return 0;
-
 
98
}
73
    return 0;
99
 
Line 74... Line 100...
74
}
100
void
75
 
101
drm_gem_object_release(struct drm_gem_object *obj)
76
 
102
{ }
Line 262... Line 288...
262
		return ret;
288
		return ret;
263
	}
289
	}
Line 264... Line 290...
264
 
290
 
265
	/* drop reference from allocate - handle holds it now */
291
	/* drop reference from allocate - handle holds it now */
266
	drm_gem_object_unreference(&obj->base);
-
 
Line 267... Line 292...
267
//   trace_i915_gem_object_create(obj);
292
	drm_gem_object_unreference(&obj->base);
268
 
293
 
269
	*handle_p = handle;
294
	*handle_p = handle;
Line 753... Line 778...
753
 
778
 
Line 754... Line 779...
754
 
779
 
755
	return 0;
780
	return 0;
756
 
781
 
Line 757... Line 782...
757
err_pages:
782
err_pages:
758
//   while (i--)
783
    while (i--)
759
//       page_cache_release(obj->pages[i]);
784
        FreePage(obj->pages[i]);
760
 
785
 
Line 767... Line 792...
767
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
792
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
768
{
793
{
769
	int page_count = obj->base.size / PAGE_SIZE;
794
	int page_count = obj->base.size / PAGE_SIZE;
770
	int i;
795
	int i;
Line -... Line 796...
-
 
796
 
-
 
797
    ENTER();
771
 
798
 
Line 772... Line 799...
772
	BUG_ON(obj->madv == __I915_MADV_PURGED);
799
	BUG_ON(obj->madv == __I915_MADV_PURGED);
773
 
800
 
Line 774... Line 801...
774
//   if (obj->tiling_mode != I915_TILING_NONE)
801
//   if (obj->tiling_mode != I915_TILING_NONE)
775
//       i915_gem_object_save_bit_17_swizzle(obj);
802
//       i915_gem_object_save_bit_17_swizzle(obj);
776
 
-
 
777
	if (obj->madv == I915_MADV_DONTNEED)
-
 
778
		obj->dirty = 0;
-
 
779
/*                                           It's a swap!!!
-
 
780
	for (i = 0; i < page_count; i++) {
-
 
781
		if (obj->dirty)
-
 
782
			set_page_dirty(obj->pages[i]);
-
 
Line -... Line 803...
-
 
803
 
783
 
804
	if (obj->madv == I915_MADV_DONTNEED)
784
		if (obj->madv == I915_MADV_WILLNEED)
805
		obj->dirty = 0;
785
			mark_page_accessed(obj->pages[i]);
806
 
786
 
-
 
Line 787... Line 807...
787
        //page_cache_release(obj->pages[i]);
807
	for (i = 0; i < page_count; i++) {
788
	}
808
        FreePage(obj->pages[i]);
-
 
809
	}
-
 
810
	obj->dirty = 0;
789
	obj->dirty = 0;
811
 
Line 790... Line 812...
790
*/
812
    free(obj->pages);
791
 
813
	obj->pages = NULL;
792
    free(obj->pages);
814
 
Line 804... Line 826...
804
	BUG_ON(ring == NULL);
826
	BUG_ON(ring == NULL);
805
	obj->ring = ring;
827
	obj->ring = ring;
Line 806... Line 828...
806
 
828
 
807
	/* Add a reference if we're newly entering the active list. */
829
	/* Add a reference if we're newly entering the active list. */
808
	if (!obj->active) {
830
	if (!obj->active) {
809
//       drm_gem_object_reference(&obj->base);
831
		drm_gem_object_reference(&obj->base);
810
		obj->active = 1;
832
		obj->active = 1;
Line 811... Line 833...
811
	}
833
	}
812
 
834
 
Line 826... Line 848...
826
		reg = &dev_priv->fence_regs[obj->fence_reg];
848
		reg = &dev_priv->fence_regs[obj->fence_reg];
827
		list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
849
		list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
828
	}
850
	}
829
}
851
}
Line -... Line 852...
-
 
852
 
-
 
853
static void
-
 
854
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-
 
855
{
-
 
856
	list_del_init(&obj->ring_list);
-
 
857
	obj->last_rendering_seqno = 0;
-
 
858
}
-
 
859
 
-
 
860
static void
-
 
861
i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-
 
862
{
-
 
863
	struct drm_device *dev = obj->base.dev;
-
 
864
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
865
 
-
 
866
	BUG_ON(!obj->active);
-
 
867
	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
 
868
 
-
 
869
	i915_gem_object_move_off_active(obj);
-
 
870
}
Line -... Line 871...
-
 
871
 
-
 
872
 
-
 
873
 
-
 
874
 
-
 
875
 
-
 
876
/* Immediately discard the backing storage */
-
 
877
static void
-
 
878
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-
 
879
{
-
 
880
	struct inode *inode;
-
 
881
 
-
 
882
	/* Our goal here is to return as much of the memory as
-
 
883
	 * is possible back to the system as we are called from OOM.
-
 
884
	 * To do this we must instruct the shmfs to drop all of its
-
 
885
	 * backing pages, *now*.
-
 
886
	 */
-
 
887
 
-
 
888
	obj->madv = __I915_MADV_PURGED;
-
 
889
}
-
 
890
 
-
 
891
static inline int
-
 
892
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
830
 
893
{
831
 
894
	return obj->madv == I915_MADV_DONTNEED;
832
 
895
}
833
 
896
 
834
static void
897
static void
Line 846... Line 909...
846
			obj->base.write_domain = 0;
909
			obj->base.write_domain = 0;
847
			list_del_init(&obj->gpu_write_list);
910
			list_del_init(&obj->gpu_write_list);
848
			i915_gem_object_move_to_active(obj, ring,
911
			i915_gem_object_move_to_active(obj, ring,
849
						       i915_gem_next_request_seqno(ring));
912
						       i915_gem_next_request_seqno(ring));
Line 850... Line -...
850
 
-
 
851
//			trace_i915_gem_object_change_domain(obj,
-
 
852
//							    obj->base.read_domains,
-
 
853
//							    old_write_domain);
913
 
854
		}
914
		}
855
	}
915
	}
Line -... Line 951...
-
 
951
 
-
 
952
 
-
 
953
 
-
 
954
 
-
 
955
 
-
 
956
/**
-
 
957
 * Ensures that all rendering to the object has completed and the object is
-
 
958
 * safe to unbind from the GTT or access from the CPU.
-
 
959
 */
-
 
960
int
-
 
961
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-
 
962
{
-
 
963
	int ret;
-
 
964
 
-
 
965
	/* This function only exists to support waiting for existing rendering,
-
 
966
	 * not for emitting required flushes.
-
 
967
	 */
-
 
968
	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
 
969
 
-
 
970
	/* If there is rendering queued on the buffer being evicted, wait for
-
 
971
	 * it.
-
 
972
	 */
-
 
973
	if (obj->active) {
-
 
974
//		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
-
 
975
//		if (ret)
-
 
976
//			return ret;
-
 
977
	}
-
 
978
 
-
 
979
	return 0;
-
 
980
}
-
 
981
 
-
 
982
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
Line -... Line 983...
-
 
983
{
-
 
984
	u32 old_write_domain, old_read_domains;
Line -... Line 985...
-
 
985
 
-
 
986
	/* Act a barrier for all accesses through the GTT */
Line -... Line 987...
-
 
987
	mb();
-
 
988
 
-
 
989
	/* Force a pagefault for domain tracking on next user access */
-
 
990
//	i915_gem_release_mmap(obj);
Line -... Line 991...
-
 
991
 
Line -... Line 992...
-
 
992
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
-
 
993
		return;
-
 
994
 
-
 
995
	old_read_domains = obj->base.read_domains;
-
 
996
	old_write_domain = obj->base.write_domain;
-
 
997
	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
-
 
998
	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
Line -... Line 999...
-
 
999
 
-
 
1000
}
-
 
1001
 
Line -... Line 1002...
-
 
1002
/**
-
 
1003
 * Unbinds an object from the GTT aperture.
-
 
1004
 */
-
 
1005
int
Line -... Line 1006...
-
 
1006
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
-
 
1007
{
-
 
1008
	int ret = 0;
-
 
1009
 
-
 
1010
    ENTER();
-
 
1011
	if (obj->gtt_space == NULL)
-
 
1012
		return 0;
Line -... Line 1013...
-
 
1013
 
Line -... Line 1014...
-
 
1014
	if (obj->pin_count != 0) {
-
 
1015
		DRM_ERROR("Attempting to unbind pinned buffer\n");
-
 
1016
		return -EINVAL;
-
 
1017
	}
-
 
1018
 
-
 
1019
	ret = i915_gem_object_finish_gpu(obj);
-
 
1020
	if (ret == -ERESTARTSYS)
-
 
1021
		return ret;
-
 
1022
	/* Continue on if we fail due to EIO, the GPU is hung so we
-
 
1023
	 * should be safe and we need to cleanup or else we might
-
 
1024
	 * cause memory corruption through use-after-free.
-
 
1025
	 */
-
 
1026
 
-
 
1027
	i915_gem_object_finish_gtt(obj);
-
 
1028
 
Line -... Line 1029...
-
 
1029
	/* Move the object to the CPU domain to ensure that
-
 
1030
	 * any possible CPU writes while it's not in the GTT
-
 
1031
	 * are flushed when we go to remap it.
-
 
1032
	 */
Line -... Line 1033...
-
 
1033
	if (ret == 0)
-
 
1034
		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Line -... Line 1035...
-
 
1035
	if (ret == -ERESTARTSYS)
-
 
1036
		return ret;
-
 
1037
	if (ret) {
-
 
1038
		/* In the event of a disaster, abandon all caches and
Line -... Line 1039...
-
 
1039
		 * hope for the best.
-
 
1040
		 */
-
 
1041
		i915_gem_clflush_object(obj);
Line -... Line 1042...
-
 
1042
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
 
1043
	}
Line -... Line 1044...
-
 
1044
 
-
 
1045
	/* release the fence reg _after_ flushing */
-
 
1046
	ret = i915_gem_object_put_fence(obj);
Line -... Line 1047...
-
 
1047
	if (ret == -ERESTARTSYS)
-
 
1048
		return ret;
-
 
1049
 
-
 
1050
 
-
 
1051
	i915_gem_gtt_unbind_object(obj);
-
 
1052
	i915_gem_object_put_pages_gtt(obj);
Line -... Line 1053...
-
 
1053
 
-
 
1054
	list_del_init(&obj->gtt_list);
Line -... Line 1055...
-
 
1055
	list_del_init(&obj->mm_list);
-
 
1056
	/* Avoid an unnecessary call to unbind on rebind. */
-
 
1057
	obj->map_and_fenceable = true;
Line -... Line 1058...
-
 
1058
 
-
 
1059
	drm_mm_put_block(obj->gtt_space);
Line -... Line 1060...
-
 
1060
	obj->gtt_space = NULL;
-
 
1061
	obj->gtt_offset = 0;
Line -... Line 1062...
-
 
1062
 
-
 
1063
	if (i915_gem_object_is_purgeable(obj))
-
 
1064
		i915_gem_object_truncate(obj);
Line -... Line 1065...
-
 
1065
 
-
 
1066
    LEAVE();
Line -... Line 1067...
-
 
1067
	return ret;
-
 
1068
}
-
 
1069
 
-
 
1070
int
-
 
1071
i915_gem_flush_ring(struct intel_ring_buffer *ring,
-
 
1072
		    uint32_t invalidate_domains,
Line -... Line 1073...
-
 
1073
		    uint32_t flush_domains)
-
 
1074
{
Line -... Line 1075...
-
 
1075
	int ret;
-
 
1076
 
-
 
1077
	if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
-
 
1078
		return 0;
-
 
1079
 
Line -... Line 1080...
-
 
1080
 
-
 
1081
	ret = ring->flush(ring, invalidate_domains, flush_domains);
-
 
1082
	if (ret)
-
 
1083
		return ret;
-
 
1084
 
-
 
1085
	if (flush_domains & I915_GEM_GPU_DOMAINS)
Line -... Line 1086...
-
 
1086
		i915_gem_process_flushing_list(ring, flush_domains);
-
 
1087
 
Line -... Line 1140...
-
 
1140
 
-
 
1141
 
-
 
1142
 
-
 
1143
 
Line 943... Line -...
943
 
-
 
944
 
-
 
945
 
-
 
946
 
-
 
947
 
1144
 
948
 
1145
static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
-
 
1146
{
949
/**
1147
	return i915_seqno_passed(ring->get_seqno(ring), seqno);
950
 * Ensures that all rendering to the object has completed and the object is
1148
}
Line -... Line 1149...
-
 
1149
 
-
 
1150
static int
951
 * safe to unbind from the GTT or access from the CPU.
1151
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
952
 */
1152
			    struct intel_ring_buffer *pipelined)
-
 
1153
{
-
 
1154
	int ret;
953
int
1155
 
954
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-
 
Line 955... Line 1156...
955
{
1156
	if (obj->fenced_gpu_access) {
956
	int ret;
1157
		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
957
 
1158
			ret = i915_gem_flush_ring(obj->last_fenced_ring,
-
 
1159
						  0, obj->base.write_domain);
-
 
1160
			if (ret)
958
	/* This function only exists to support waiting for existing rendering,
1161
				return ret;
959
	 * not for emitting required flushes.
1162
		}
-
 
1163
 
960
	 */
1164
		obj->fenced_gpu_access = false;
961
	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
1165
	}
962
 
1166
 
Line 963... Line 1167...
963
	/* If there is rendering queued on the buffer being evicted, wait for
1167
	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
-
 
1168
		if (!ring_passed_seqno(obj->last_fenced_ring,
964
	 * it.
1169
				       obj->last_fenced_seqno)) {
Line -... Line 1170...
-
 
1170
//           ret = i915_wait_request(obj->last_fenced_ring,
-
 
1171
//                       obj->last_fenced_seqno);
-
 
1172
//           if (ret)
-
 
1173
//               return ret;
-
 
1174
		}
-
 
1175
 
-
 
1176
		obj->last_fenced_seqno = 0;
-
 
1177
		obj->last_fenced_ring = NULL;
Line 965... Line 1178...
965
	 */
1178
	}
966
//	if (obj->active) {
1179
 
967
//		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
-
 
968
//		if (ret)
-
 
969
//			return ret;
1180
	/* Ensure that all CPU reads are completed before installing a fence
970
//	}
1181
	 * and all writes before removing the fence.
Line 971... Line 1182...
971
 
1182
	 */
972
	return 0;
1183
	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
Line 973... Line 1184...
973
}
1184
		mb();
974
 
-
 
975
 
-
 
976
int
1185
 
977
i915_gem_flush_ring(struct intel_ring_buffer *ring,
1186
	return 0;
Line 978... Line 1187...
978
		    uint32_t invalidate_domains,
1187
}
-
 
1188
 
979
		    uint32_t flush_domains)
1189
int
-
 
1190
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-
 
1191
{
-
 
1192
	int ret;
-
 
1193
 
Line 980... Line 1194...
980
{
1194
//   if (obj->tiling_mode)
981
	int ret;
1195
//       i915_gem_release_mmap(obj);
Line -... Line 1226...
-
 
1226
 
-
 
1227
 
-
 
1228
 
-
 
1229
 
-
 
1230
 
-
 
1231
 
-
 
1232
 
-
 
1233
 
-
 
1234
 
-
 
1235
 
-
 
1236
 
-
 
1237
 
-
 
1238
 
-
 
1239
 
1012
 
1240
 
1013
 
1241
 
1014
 
1242
 
1015
 
1243
 
1016
 
1244
 
Line 1162... Line 1390...
1162
		return ret;
1390
		return ret;
1163
	}
1391
	}
Line 1164... Line 1392...
1164
 
1392
 
1165
	ret = i915_gem_gtt_bind_object(obj);
1393
	ret = i915_gem_gtt_bind_object(obj);
1166
	if (ret) {
1394
	if (ret) {
1167
//       i915_gem_object_put_pages_gtt(obj);
1395
        i915_gem_object_put_pages_gtt(obj);
1168
		drm_mm_put_block(obj->gtt_space);
1396
		drm_mm_put_block(obj->gtt_space);
Line 1169... Line 1397...
1169
		obj->gtt_space = NULL;
1397
		obj->gtt_space = NULL;
1170
 
1398
 
Line 1193... Line 1421...
1193
	mappable =
1421
	mappable =
1194
		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
1422
		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Line 1195... Line 1423...
1195
 
1423
 
Line 1196... Line -...
1196
	obj->map_and_fenceable = mappable && fenceable;
-
 
1197
 
1424
	obj->map_and_fenceable = mappable && fenceable;
1198
//   trace_i915_gem_object_bind(obj, map_and_fenceable);
1425
 
Line 1199... Line 1426...
1199
	return 0;
1426
	return 0;
1200
}
1427
}
Line 1218... Line 1445...
1218
	 * tracking.
1445
	 * tracking.
1219
	 */
1446
	 */
1220
	if (obj->cache_level != I915_CACHE_NONE)
1447
	if (obj->cache_level != I915_CACHE_NONE)
1221
		return;
1448
		return;
Line -... Line 1449...
-
 
1449
 
-
 
1450
     if(obj->mapped != NULL)
1222
 
1451
     {
-
 
1452
        uint8_t *page_virtual;
Line -... Line 1453...
-
 
1453
        unsigned int i;
-
 
1454
 
-
 
1455
        page_virtual = obj->mapped;
-
 
1456
        asm volatile("mfence");
-
 
1457
        for (i = 0; i < obj->base.size; i += x86_clflush_size)
-
 
1458
            clflush(page_virtual + i);
-
 
1459
        asm volatile("mfence");
-
 
1460
     }
-
 
1461
     else
-
 
1462
     {
1223
//   trace_i915_gem_object_clflush(obj);
1463
        uint8_t *page_virtual;
-
 
1464
        unsigned int i;
-
 
1465
        page_virtual = AllocKernelSpace(obj->base.size);
-
 
1466
        if(page_virtual != NULL)
-
 
1467
        {
-
 
1468
            u32_t *src, *dst;
-
 
1469
            u32 count;
-
 
1470
 
-
 
1471
#define page_tabs  0xFDC00000      /* really dirty hack */
-
 
1472
 
-
 
1473
            src =  (u32_t*)obj->pages;
-
 
1474
            dst =  &((u32_t*)page_tabs)[(u32_t)page_virtual >> 12];
-
 
1475
            count = obj->base.size/4096;
-
 
1476
 
-
 
1477
            while(count--)
-
 
1478
            {
-
 
1479
                *dst++ = (0xFFFFF000 & *src++) | 0x001 ;
-
 
1480
            };
-
 
1481
 
-
 
1482
            asm volatile("mfence");
-
 
1483
            for (i = 0; i < obj->base.size; i += x86_clflush_size)
-
 
1484
                clflush(page_virtual + i);
-
 
1485
            asm volatile("mfence");
-
 
1486
            FreeKernelSpace(page_virtual);
1224
 
1487
        }
-
 
1488
        else
-
 
1489
        {
1225
//   drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
1490
            asm volatile (
-
 
1491
            "mfence         \n"
1226
     mb();
1492
            "wbinvd         \n"                 /* this is really ugly  */
-
 
1493
            "mfence");
1227
     __asm__ ("wbinvd");   // this is really ugly
1494
        }
Line 1228... Line 1495...
1228
     mb();
1495
     }
1229
}
1496
}
1230
 
1497
 
Line 1237... Line 1504...
1237
 
1504
 
1238
	/* Queue the GPU write cache flushing we need. */
1505
	/* Queue the GPU write cache flushing we need. */
1239
	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
1506
	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Line -... Line 1507...
-
 
1507
}
-
 
1508
 
-
 
1509
/** Flushes the GTT write domain for the object if it's dirty. */
-
 
1510
static void
-
 
1511
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Line -... Line 1512...
-
 
1512
{
-
 
1513
	uint32_t old_write_domain;
Line -... Line 1514...
-
 
1514
 
-
 
1515
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
-
 
1516
		return;
-
 
1517
 
-
 
1518
	/* No actual flushing is required for the GTT write domain.  Writes
-
 
1519
	 * to it immediately go to main memory as far as we know, so there's
-
 
1520
	 * no chipset flush.  It also doesn't land in render cache.
-
 
1521
	 *
-
 
1522
	 * However, we do have to enforce the order so that all writes through
Line -... Line 1523...
-
 
1523
	 * the GTT land before any writes to the device, such as updates to
-
 
1524
	 * the GATT itself.
Line -... Line 1525...
-
 
1525
	 */
Line 1240... Line 1526...
1240
}
1526
	wmb();
1241
 
1527
 
1242
 
1528
	old_write_domain = obj->base.write_domain;
1243
 
1529
	obj->base.write_domain = 0;
Line 1256... Line 1542...
1256
	i915_gem_clflush_object(obj);
1542
	i915_gem_clflush_object(obj);
1257
	intel_gtt_chipset_flush();
1543
	intel_gtt_chipset_flush();
1258
	old_write_domain = obj->base.write_domain;
1544
	old_write_domain = obj->base.write_domain;
1259
	obj->base.write_domain = 0;
1545
	obj->base.write_domain = 0;
Line 1260... Line -...
1260
 
-
 
1261
//	trace_i915_gem_object_change_domain(obj,
-
 
1262
//					    obj->base.read_domains,
-
 
1263
//					    old_write_domain);
1546
 
Line 1264... Line 1547...
1264
}
1547
}
1265
 
1548
 
1266
/**
1549
/**
Line 1361... Line 1644...
1361
		old_write_domain = obj->base.write_domain;
1644
		old_write_domain = obj->base.write_domain;
Line 1362... Line 1645...
1362
 
1645
 
1363
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1646
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
Line 1364... Line -...
1364
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
 
1365
 
-
 
1366
		trace_i915_gem_object_change_domain(obj,
-
 
1367
						    old_read_domains,
1647
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Line 1368... Line 1648...
1368
						    old_write_domain);
1648
 
1369
	}
1649
    }
1370
 
1650
 
Line 1431... Line 1711...
1431
	 * the domain values for our changes.
1711
	 * the domain values for our changes.
1432
	 */
1712
	 */
1433
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1713
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1434
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
1714
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Line 1435... Line -...
1435
 
-
 
1436
//   trace_i915_gem_object_change_domain(obj,
-
 
1437
//                       old_read_domains,
-
 
Line 1438... Line 1715...
1438
//                       old_write_domain);
1715
 
1439
 
1716
 
Line -... Line 1717...
-
 
1717
	return 0;
-
 
1718
}
-
 
1719
 
-
 
1720
int
Line -... Line 1721...
-
 
1721
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
-
 
1722
{
Line -... Line 1723...
-
 
1723
	int ret;
-
 
1724
 
-
 
1725
	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
-
 
1726
		return 0;
-
 
1727
 
Line -... Line 1728...
-
 
1728
	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-
 
1729
		ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Line -... Line 1730...
-
 
1730
		if (ret)
-
 
1731
			return ret;
Line -... Line 1732...
-
 
1732
	}
-
 
1733
 
-
 
1734
	/* Ensure that we invalidate the GPU's caches and TLBs. */
-
 
1735
	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-
 
1736
 
-
 
1737
	return i915_gem_object_wait_rendering(obj);
-
 
1738
}
-
 
1739
 
-
 
1740
/**
-
 
1741
 * Moves a single object to the CPU read, and possibly write domain.
-
 
1742
 *
Line -... Line 1743...
-
 
1743
 * This function returns when the move is complete, including waiting on
-
 
1744
 * flushes to occur.
Line -... Line 1745...
-
 
1745
 */
-
 
1746
static int
-
 
1747
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Line -... Line 1748...
-
 
1748
{
-
 
1749
	uint32_t old_write_domain, old_read_domains;
-
 
1750
	int ret;
Line -... Line 1751...
-
 
1751
 
Line -... Line 1752...
-
 
1752
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
-
 
1753
		return 0;
-
 
1754
 
-
 
1755
	ret = i915_gem_object_flush_gpu_write_domain(obj);
Line -... Line 1756...
-
 
1756
	if (ret)
-
 
1757
		return ret;
Line -... Line 1758...
-
 
1758
 
-
 
1759
	ret = i915_gem_object_wait_rendering(obj);
-
 
1760
	if (ret)
Line -... Line 1761...
-
 
1761
		return ret;
-
 
1762
 
Line -... Line 1763...
-
 
1763
	i915_gem_object_flush_gtt_write_domain(obj);
-
 
1764
 
-
 
1765
	/* If we have a partially-valid cache of the object in the CPU,
-
 
1766
	 * finish invalidating it and free the per-page flags.
Line -... Line 1767...
-
 
1767
	 */
-
 
1768
	i915_gem_object_set_to_full_cpu_read_domain(obj);
-
 
1769
 
-
 
1770
	old_write_domain = obj->base.write_domain;
-
 
1771
	old_read_domains = obj->base.read_domains;
-
 
1772
 
-
 
1773
	/* Flush the CPU cache if it's still invalid. */
Line -... Line 1774...
-
 
1774
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
-
 
1775
		i915_gem_clflush_object(obj);
Line -... Line 1776...
-
 
1776
 
-
 
1777
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
 
1778
	}
-
 
1779
 
-
 
1780
	/* It should now be out of any other write domains, and we can update
-
 
1781
	 * the domain values for our changes.
-
 
1782
	 */
-
 
1783
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
 
1784
 
-
 
1785
	/* If we're writing through the CPU, then the GPU read domains will
-
 
1786
	 * need to be invalidated at next use.
Line -... Line 1787...
-
 
1787
	 */
-
 
1788
	if (write) {
-
 
1789
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-
 
1790
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Line -... Line 1791...
-
 
1791
	}
-
 
1792
 
-
 
1793
 
-
 
1794
	return 0;
-
 
1795
}
1440
	return 0;
1796
 
Line 1493... Line 1849...
1493
	struct drm_device *dev = obj->base.dev;
1849
	struct drm_device *dev = obj->base.dev;
1494
	struct drm_i915_private *dev_priv = dev->dev_private;
1850
	struct drm_i915_private *dev_priv = dev->dev_private;
1495
	int ret;
1851
	int ret;
Line 1496... Line 1852...
1496
 
1852
 
1497
	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-
 
Line 1498... Line 1853...
1498
//   WARN_ON(i915_verify_lists(dev));
1853
	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
1499
 
1854
 
1500
#if 0
1855
#if 0
1501
	if (obj->gtt_space != NULL) {
1856
	if (obj->gtt_space != NULL) {
Line 1527... Line 1882...
1527
			list_move_tail(&obj->mm_list,
1882
			list_move_tail(&obj->mm_list,
1528
				       &dev_priv->mm.pinned_list);
1883
				       &dev_priv->mm.pinned_list);
1529
	}
1884
	}
1530
	obj->pin_mappable |= map_and_fenceable;
1885
	obj->pin_mappable |= map_and_fenceable;
Line 1531... Line -...
1531
 
-
 
1532
//   WARN_ON(i915_verify_lists(dev));
1886
 
1533
	return 0;
1887
	return 0;
Line -... Line 1888...
-
 
1888
}
-
 
1889
 
-
 
1890
void
-
 
1891
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
-
 
1892
{
Line -... Line 1893...
-
 
1893
	struct drm_device *dev = obj->base.dev;
-
 
1894
	drm_i915_private_t *dev_priv = dev->dev_private;
Line -... Line 1895...
-
 
1895
 
-
 
1896
	BUG_ON(obj->pin_count == 0);
-
 
1897
	BUG_ON(obj->gtt_space == NULL);
-
 
1898
 
-
 
1899
	if (--obj->pin_count == 0) {
-
 
1900
		if (!obj->active)
-
 
1901
			list_move_tail(&obj->mm_list,
Line 1617... Line 1985...
1617
	obj->map_and_fenceable = true;
1985
	obj->map_and_fenceable = true;
Line 1618... Line 1986...
1618
 
1986
 
1619
	return obj;
1987
	return obj;
Line -... Line 1988...
-
 
1988
}
-
 
1989
 
-
 
1990
int i915_gem_init_object(struct drm_gem_object *obj)
-
 
1991
{
-
 
1992
	BUG();
-
 
1993
 
Line -... Line 1994...
-
 
1994
	return 0;
-
 
1995
}
-
 
1996
 
-
 
1997
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
-
 
1998
{
Line -... Line 1999...
-
 
1999
	struct drm_device *dev = obj->base.dev;
Line -... Line 2000...
-
 
2000
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
2001
	int ret;
-
 
2002
 
-
 
2003
    ENTER();
-
 
2004
 
-
 
2005
	ret = i915_gem_object_unbind(obj);
Line -... Line 2006...
-
 
2006
	if (ret == -ERESTARTSYS) {
-
 
2007
		list_move(&obj->mm_list,
Line -... Line 2008...
-
 
2008
			  &dev_priv->mm.deferred_free_list);
-
 
2009
		return;
Line -... Line 2010...
-
 
2010
	}
-
 
2011
 
-
 
2012
 
-
 
2013
//	if (obj->base.map_list.map)
-
 
2014
//		drm_gem_free_mmap_offset(&obj->base);
Line -... Line 2015...
-
 
2015
 
-
 
2016
	drm_gem_object_release(&obj->base);
-
 
2017
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
-
 
2018
 
Line -... Line 2019...
-
 
2019
	kfree(obj->page_cpu_valid);
-
 
2020
	kfree(obj->bit_17);
-
 
2021
	kfree(obj);
Line -... Line 2022...
-
 
2022
    LEAVE();
-
 
2023
}
Line -... Line 2024...
-
 
2024
 
-
 
2025
void i915_gem_free_object(struct drm_gem_object *gem_obj)
-
 
2026
{
Line 1782... Line 2189...
1782
    INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
2189
    INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
1783
    for (i = 0; i < I915_NUM_RINGS; i++)
2190
    for (i = 0; i < I915_NUM_RINGS; i++)
1784
        init_ring_lists(&dev_priv->ring[i]);
2191
        init_ring_lists(&dev_priv->ring[i]);
1785
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
2192
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
1786
        INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
2193
        INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
1787
//    INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
-
 
1788
//              i915_gem_retire_work_handler);
-
 
1789
//    init_completion(&dev_priv->error_completion);
-
 
Line 1790... Line 2194...
1790
 
2194
 
1791
    /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
2195
    /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
1792
    if (IS_GEN3(dev)) {
2196
    if (IS_GEN3(dev)) {
1793
        u32 tmp = I915_READ(MI_ARB_STATE);
2197
        u32 tmp = I915_READ(MI_ARB_STATE);
Line 1809... Line 2213...
1809
    for (i = 0; i < dev_priv->num_fence_regs; i++) {
2213
    for (i = 0; i < dev_priv->num_fence_regs; i++) {
1810
        i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
2214
        i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
1811
    }
2215
    }
Line 1812... Line 2216...
1812
 
2216
 
1813
    i915_gem_detect_bit_6_swizzle(dev);
-
 
Line 1814... Line 2217...
1814
//    init_waitqueue_head(&dev_priv->pending_flip_queue);
2217
    i915_gem_detect_bit_6_swizzle(dev);
Line 1815... Line 2218...
1815
 
2218
 
1816
    dev_priv->mm.interruptible = true;
2219
    dev_priv->mm.interruptible = true;