Subversion Repositories Kolibri OS

Rev

Rev 3255 | Rev 3263 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3255 Rev 3260
Line 28... Line 28...
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "i915_drv.h"
30
#include "i915_drv.h"
31
#include "i915_trace.h"
31
#include "i915_trace.h"
32
#include "intel_drv.h"
32
#include "intel_drv.h"
-
 
33
#include 
33
#include 
34
#include 
34
//#include 
35
//#include 
35
#include 
36
#include 
Line 36... Line 37...
36
 
37
 
Line 50... Line 51...
50
 
51
 
Line 51... Line 52...
51
#define MAX_ERRNO       4095
52
#define MAX_ERRNO       4095
Line 52... Line -...
52
 
-
 
53
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
-
 
54
 
-
 
55
void
-
 
56
drm_gem_object_free(struct kref *kref)
-
 
57
{
-
 
58
    struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-
 
59
    struct drm_device *dev = obj->dev;
-
 
60
 
-
 
61
    BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
 
62
 
-
 
63
    i915_gem_free_object(obj);
-
 
64
}
-
 
65
 
-
 
66
/**
-
 
67
 * Initialize an already allocated GEM object of the specified size with
-
 
68
 * shmfs backing store.
-
 
69
 */
-
 
70
int drm_gem_object_init(struct drm_device *dev,
-
 
71
            struct drm_gem_object *obj, size_t size)
-
 
72
{
-
 
73
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
 
74
 
-
 
75
    obj->dev = dev;
-
 
76
    kref_init(&obj->refcount);
-
 
77
    atomic_set(&obj->handle_count, 0);
-
 
78
    obj->size = size;
-
 
79
 
-
 
80
    return 0;
-
 
81
}
-
 
82
 
-
 
83
void
-
 
Line 84... Line 53...
84
drm_gem_object_release(struct drm_gem_object *obj)
53
 
85
{ }
54
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
86
 
55
 
87
 
56
 
Line 136... Line 105...
136
{
105
{
137
	dev_priv->mm.object_count--;
106
	dev_priv->mm.object_count--;
138
	dev_priv->mm.object_memory -= size;
107
	dev_priv->mm.object_memory -= size;
139
}
108
}
Line 140... Line -...
140
 
-
 
141
 
109
 
142
static int
110
static int
143
i915_gem_wait_for_error(struct drm_device *dev)
111
i915_gem_wait_for_error(struct drm_device *dev)
144
{
112
{
145
	struct drm_i915_private *dev_priv = dev->dev_private;
113
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 279... Line 247...
279
	/* drop reference from allocate - handle holds it now */
247
	/* drop reference from allocate - handle holds it now */
280
	drm_gem_object_unreference(&obj->base);
248
	drm_gem_object_unreference(&obj->base);
281
	trace_i915_gem_object_create(obj);
249
	trace_i915_gem_object_create(obj);
Line 282... Line 250...
282
 
250
 
283
	*handle_p = handle;
-
 
284
 
251
	*handle_p = handle;
285
	return 0;
252
	return 0;
Line 286... Line 253...
286
}
253
}
287
 
254
 
Line 315... Line 282...
315
 
282
 
316
	return i915_gem_create(file, dev,
283
	return i915_gem_create(file, dev,
317
			       args->size, &args->handle);
284
			       args->size, &args->handle);
Line 318... Line -...
318
}
-
 
319
 
-
 
320
#if 0
285
}
321
 
286
 
322
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
287
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Line 323... Line 288...
323
{
288
{
324
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
289
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
325
 
290
 
-
 
291
	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Line 326... Line 292...
326
	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
292
		obj->tiling_mode != I915_TILING_NONE;
327
		obj->tiling_mode != I915_TILING_NONE;
293
}
328
}
294
#if 0
329
 
295
 
Line 648... Line 614...
648
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
614
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
649
						      user_data, length);
615
						      user_data, length);
650
	io_mapping_unmap_atomic(vaddr_atomic);
616
	io_mapping_unmap_atomic(vaddr_atomic);
651
	return unwritten;
617
	return unwritten;
652
}
618
}
-
 
619
#endif
Line -... Line 620...
-
 
620
 
653
 
621
#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
654
/**
622
/**
655
 * This is the fast pwrite path, where we copy the data directly from the
623
 * This is the fast pwrite path, where we copy the data directly from the
656
 * user into the GTT, uncached.
624
 * user into the GTT, uncached.
657
 */
625
 */
Line 664... Line 632...
664
	drm_i915_private_t *dev_priv = dev->dev_private;
632
	drm_i915_private_t *dev_priv = dev->dev_private;
665
	ssize_t remain;
633
	ssize_t remain;
666
	loff_t offset, page_base;
634
	loff_t offset, page_base;
667
	char __user *user_data;
635
	char __user *user_data;
668
	int page_offset, page_length, ret;
636
	int page_offset, page_length, ret;
-
 
637
    char *vaddr;
Line 669... Line 638...
669
 
638
 
670
	ret = i915_gem_object_pin(obj, 0, true, true);
639
	ret = i915_gem_object_pin(obj, 0, true, true);
671
	if (ret)
640
	if (ret)
Line 677... Line 646...
677
 
646
 
678
	ret = i915_gem_object_put_fence(obj);
647
	ret = i915_gem_object_put_fence(obj);
679
	if (ret)
648
	if (ret)
Line -... Line 649...
-
 
649
		goto out_unpin;
-
 
650
 
-
 
651
    vaddr = AllocKernelSpace(4096);
-
 
652
    if(vaddr == NULL)
-
 
653
    {
-
 
654
        ret = -ENOSPC;
-
 
655
        goto out_unpin;
680
		goto out_unpin;
656
    };
681
 
657
 
Line 682... Line 658...
682
	user_data = (char __user *) (uintptr_t) args->data_ptr;
658
	user_data = (char __user *) (uintptr_t) args->data_ptr;
Line 695... Line 671...
695
		page_offset = offset_in_page(offset);
671
		page_offset = offset_in_page(offset);
696
		page_length = remain;
672
		page_length = remain;
697
		if ((page_offset + remain) > PAGE_SIZE)
673
		if ((page_offset + remain) > PAGE_SIZE)
698
			page_length = PAGE_SIZE - page_offset;
674
			page_length = PAGE_SIZE - page_offset;
Line 699... Line 675...
699
 
675
 
700
		/* If we get a fault while copying data, then (presumably) our
-
 
701
		 * source page isn't available.  Return the error and we'll
-
 
702
		 * retry in the slow path.
676
        MapPage(vaddr, page_base, PG_SW|PG_NOCACHE);
703
		 */
-
 
704
		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
677
 
705
				    page_offset, user_data, page_length)) {
-
 
706
			ret = -EFAULT;
-
 
707
			goto out_unpin;
-
 
Line 708... Line 678...
708
		}
678
        memcpy(vaddr+page_offset, user_data, page_length);
709
 
679
 
710
		remain -= page_length;
680
		remain -= page_length;
711
		user_data += page_length;
681
		user_data += page_length;
Line -... Line 682...
-
 
682
		offset += page_length;
-
 
683
	}
712
		offset += page_length;
684
 
713
	}
685
    FreeKernelSpace(vaddr);
714
 
686
 
-
 
687
out_unpin:
-
 
688
	i915_gem_object_unpin(obj);
715
out_unpin:
689
out:
716
	i915_gem_object_unpin(obj);
690
    printf("% s ret = %d\n", __FUNCTION__, ret);
Line 717... Line 691...
717
out:
691
 
718
	return ret;
692
	return ret;
Line 728... Line 702...
728
		  bool page_do_bit17_swizzling,
702
		  bool page_do_bit17_swizzling,
729
		  bool needs_clflush_before,
703
		  bool needs_clflush_before,
730
		  bool needs_clflush_after)
704
		  bool needs_clflush_after)
731
{
705
{
732
	char *vaddr;
706
	char *vaddr;
733
	int ret;
707
	int ret = 0;
Line 734... Line 708...
734
 
708
 
735
	if (unlikely(page_do_bit17_swizzling))
709
	if (unlikely(page_do_bit17_swizzling))
Line 736... Line 710...
736
		return -EINVAL;
710
		return -EINVAL;
737
 
711
 
738
	vaddr = kmap_atomic(page);
712
	vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW);
739
	if (needs_clflush_before)
713
	if (needs_clflush_before)
740
		drm_clflush_virt_range(vaddr + shmem_page_offset,
714
		drm_clflush_virt_range(vaddr + shmem_page_offset,
741
				       page_length);
715
				       page_length);
742
	ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
716
	memcpy(vaddr + shmem_page_offset,
743
						user_data,
717
						user_data,
744
						page_length);
718
						page_length);
745
	if (needs_clflush_after)
719
	if (needs_clflush_after)
746
		drm_clflush_virt_range(vaddr + shmem_page_offset,
720
		drm_clflush_virt_range(vaddr + shmem_page_offset,
Line 747... Line 721...
747
				       page_length);
721
				       page_length);
748
	kunmap_atomic(vaddr);
722
	FreeKernelSpace(vaddr);
-
 
723
 
Line 749... Line 724...
749
 
724
	return ret ? -EFAULT : 0;
750
	return ret ? -EFAULT : 0;
725
}
751
}
726
#if 0
752
 
727
 
Line 781... Line 756...
781
					     page_do_bit17_swizzling);
756
					     page_do_bit17_swizzling);
782
	kunmap(page);
757
	kunmap(page);
Line 783... Line 758...
783
 
758
 
784
	return ret ? -EFAULT : 0;
759
	return ret ? -EFAULT : 0;
-
 
760
}
-
 
761
#endif
Line 785... Line 762...
785
}
762
 
786
 
763
 
787
static int
764
static int
788
i915_gem_shmem_pwrite(struct drm_device *dev,
765
i915_gem_shmem_pwrite(struct drm_device *dev,
Line 858... Line 835...
858
		/* If we don't overwrite a cacheline completely we need to be
835
		/* If we don't overwrite a cacheline completely we need to be
859
		 * careful to have up-to-date data by first clflushing. Don't
836
		 * careful to have up-to-date data by first clflushing. Don't
860
		 * overcomplicate things and flush the entire patch. */
837
		 * overcomplicate things and flush the entire patch. */
861
		partial_cacheline_write = needs_clflush_before &&
838
		partial_cacheline_write = needs_clflush_before &&
862
			((shmem_page_offset | page_length)
839
			((shmem_page_offset | page_length)
863
				& (boot_cpu_data.x86_clflush_size - 1));
840
				& (x86_clflush_size - 1));
Line 864... Line 841...
864
 
841
 
865
		page = sg_page(sg);
842
		page = sg_page(sg);
866
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
843
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
Line 873... Line 850...
873
		if (ret == 0)
850
		if (ret == 0)
874
			goto next_page;
851
			goto next_page;
Line 875... Line 852...
875
 
852
 
876
		hit_slowpath = 1;
853
		hit_slowpath = 1;
-
 
854
		mutex_unlock(&dev->struct_mutex);
-
 
855
		dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
877
		mutex_unlock(&dev->struct_mutex);
856
 
878
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
857
//		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
879
					user_data, page_do_bit17_swizzling,
858
//					user_data, page_do_bit17_swizzling,
880
					partial_cacheline_write,
859
//					partial_cacheline_write,
Line 881... Line 860...
881
					needs_clflush_after);
860
//					needs_clflush_after);
Line 882... Line 861...
882
 
861
 
883
		mutex_lock(&dev->struct_mutex);
-
 
884
 
-
 
Line 885... Line 862...
885
next_page:
862
		mutex_lock(&dev->struct_mutex);
886
		set_page_dirty(page);
863
 
Line 887... Line 864...
887
		mark_page_accessed(page);
864
next_page:
Line 929... Line 906...
929
	int ret;
906
	int ret;
Line 930... Line 907...
930
 
907
 
931
	if (args->size == 0)
908
	if (args->size == 0)
Line 932... Line -...
932
		return 0;
-
 
933
 
-
 
934
	if (!access_ok(VERIFY_READ,
-
 
935
		       (char __user *)(uintptr_t)args->data_ptr,
-
 
936
		       args->size))
-
 
937
		return -EFAULT;
-
 
938
 
-
 
939
	ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
-
 
940
					   args->size);
-
 
941
	if (ret)
-
 
942
		return -EFAULT;
909
		return 0;
943
 
910
 
944
	ret = i915_mutex_lock_interruptible(dev);
911
	ret = i915_mutex_lock_interruptible(dev);
Line 945... Line 912...
945
	if (ret)
912
	if (ret)
Line 973... Line 940...
973
	 * it would end up going through the fenced access, and we'll get
940
	 * it would end up going through the fenced access, and we'll get
974
	 * different detiling behavior between reading and writing.
941
	 * different detiling behavior between reading and writing.
975
	 * pread/pwrite currently are reading and writing from the CPU
942
	 * pread/pwrite currently are reading and writing from the CPU
976
	 * perspective, requiring manual detiling by the client.
943
	 * perspective, requiring manual detiling by the client.
977
	 */
944
	 */
978
	if (obj->phys_obj) {
945
//   if (obj->phys_obj) {
979
		ret = i915_gem_phys_pwrite(dev, obj, args, file);
946
//       ret = i915_gem_phys_pwrite(dev, obj, args, file);
980
		goto out;
947
//       goto out;
981
	}
948
//   }
Line 982... Line 949...
982
 
949
 
983
	if (obj->cache_level == I915_CACHE_NONE &&
950
	if (obj->cache_level == I915_CACHE_NONE &&
984
	    obj->tiling_mode == I915_TILING_NONE &&
951
	    obj->tiling_mode == I915_TILING_NONE &&
985
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
952
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Line 997... Line 964...
997
unlock:
964
unlock:
998
	mutex_unlock(&dev->struct_mutex);
965
	mutex_unlock(&dev->struct_mutex);
999
	return ret;
966
	return ret;
1000
}
967
}
Line 1001... Line -...
1001
 
-
 
1002
#endif
-
 
1003
 
968
 
1004
int
969
int
1005
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
970
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1006
		     bool interruptible)
971
		     bool interruptible)
1007
{
972
{
Line 1121... Line 1086...
1121
		return -ETIME;
1086
		return -ETIME;
1122
	default: /* Completed */
1087
	default: /* Completed */
1123
		WARN_ON(end < 0); /* We're not aware of other errors */
1088
		WARN_ON(end < 0); /* We're not aware of other errors */
1124
		return 0;
1089
		return 0;
1125
	}
1090
	}
-
 
1091
 
1126
#endif
1092
#endif
Line 1127... Line 1093...
1127
 
1093
 
1128
#define EXIT_COND \
1094
#define EXIT_COND \
1129
    (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1095
    (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
Line 1193... Line 1159...
1193
	}
1159
	}
Line 1194... Line 1160...
1194
 
1160
 
1195
	return 0;
1161
	return 0;
Line -... Line 1162...
-
 
1162
}
-
 
1163
 
-
 
1164
/* A nonblocking variant of the above wait. This is a highly dangerous routine
-
 
1165
 * as the object state may change during this call.
-
 
1166
 */
-
 
1167
static __must_check int
-
 
1168
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-
 
1169
					    bool readonly)
-
 
1170
{
-
 
1171
	struct drm_device *dev = obj->base.dev;
-
 
1172
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1173
	struct intel_ring_buffer *ring = obj->ring;
Line -... Line 1174...
-
 
1174
	u32 seqno;
-
 
1175
	int ret;
Line -... Line 1176...
-
 
1176
 
-
 
1177
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
 
1178
	BUG_ON(!dev_priv->mm.interruptible);
Line -... Line 1179...
-
 
1179
 
-
 
1180
	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-
 
1181
	if (seqno == 0)
Line -... Line 1182...
-
 
1182
		return 0;
-
 
1183
 
-
 
1184
	ret = i915_gem_check_wedge(dev_priv, true);
Line -... Line 1185...
-
 
1185
	if (ret)
-
 
1186
		return ret;
-
 
1187
 
Line -... Line 1188...
-
 
1188
	ret = i915_gem_check_olr(ring, seqno);
Line -... Line 1189...
-
 
1189
	if (ret)
-
 
1190
		return ret;
-
 
1191
 
-
 
1192
	mutex_unlock(&dev->struct_mutex);
-
 
1193
	ret = __wait_seqno(ring, seqno, true, NULL);
-
 
1194
	mutex_lock(&dev->struct_mutex);
-
 
1195
 
-
 
1196
	i915_gem_retire_requests_ring(ring);
Line -... Line 1197...
-
 
1197
 
-
 
1198
	/* Manually manage the write flush as we may have not yet
Line -... Line 1199...
-
 
1199
	 * retired the buffer.
-
 
1200
	 */
-
 
1201
	if (obj->last_write_seqno &&
-
 
1202
	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
-
 
1203
		obj->last_write_seqno = 0;
-
 
1204
		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-
 
1205
	}
-
 
1206
 
-
 
1207
	return ret;
-
 
1208
}
-
 
1209
 
-
 
1210
/**
-
 
1211
 * Called when user space prepares to use an object with the CPU, either
Line -... Line 1212...
-
 
1212
 * through the mmap ioctl's mapping or a GTT mapping.
-
 
1213
 */
-
 
1214
int
Line -... Line 1215...
-
 
1215
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
-
 
1216
			  struct drm_file *file)
Line -... Line 1217...
-
 
1217
{
-
 
1218
	struct drm_i915_gem_set_domain *args = data;
-
 
1219
	struct drm_i915_gem_object *obj;
-
 
1220
	uint32_t read_domains = args->read_domains;
-
 
1221
	uint32_t write_domain = args->write_domain;
Line -... Line 1222...
-
 
1222
	int ret;
-
 
1223
 
-
 
1224
	/* Only handle setting domains to types used by the CPU. */
Line -... Line 1225...
-
 
1225
	if (write_domain & I915_GEM_GPU_DOMAINS)
-
 
1226
		return -EINVAL;
-
 
1227
 
-
 
1228
	if (read_domains & I915_GEM_GPU_DOMAINS)
-
 
1229
		return -EINVAL;
Line -... Line 1230...
-
 
1230
 
-
 
1231
	/* Having something in the write domain implies it's in the read
-
 
1232
	 * domain, and only that read domain.  Enforce that in the request.
-
 
1233
	 */
-
 
1234
	if (write_domain != 0 && read_domains != write_domain)
-
 
1235
		return -EINVAL;
-
 
1236
 
Line -... Line 1237...
-
 
1237
	ret = i915_mutex_lock_interruptible(dev);
-
 
1238
	if (ret)
Line -... Line 1239...
-
 
1239
		return ret;
-
 
1240
 
-
 
1241
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-
 
1242
	if (&obj->base == NULL) {
-
 
1243
		ret = -ENOENT;
-
 
1244
		goto unlock;
-
 
1245
	}
-
 
1246
 
-
 
1247
	/* Try to flush the object off the GPU without holding the lock.
Line -... Line 1248...
-
 
1248
	 * We will repeat the flush holding the lock in the normal manner
-
 
1249
	 * to catch cases where we are gazumped.
-
 
1250
	 */
-
 
1251
	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
-
 
1252
	if (ret)
-
 
1253
		goto unref;
Line -... Line 1254...
-
 
1254
 
-
 
1255
	if (read_domains & I915_GEM_DOMAIN_GTT) {
-
 
1256
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
-
 
1257
 
-
 
1258
		/* Silently promote "you're not bound, there was nothing to do"
-
 
1259
		 * to success, since the client was just asking us to
-
 
1260
		 * make sure everything was done.
-
 
1261
		 */
-
 
1262
		if (ret == -EINVAL)
-
 
1263
			ret = 0;
-
 
1264
	} else {
-
 
1265
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
-
 
1266
	}
-
 
1267
 
Line -... Line 1268...
-
 
1268
unref:
-
 
1269
	drm_gem_object_unreference(&obj->base);
-
 
1270
unlock:
Line -... Line 1271...
-
 
1271
	mutex_unlock(&dev->struct_mutex);
-
 
1272
	return ret;
-
 
1273
}
-
 
1274
 
-
 
1275
 
-
 
1276
 
-
 
1277
 
-
 
1278
 
-
 
1279
 
Line -... Line 1280...
-
 
1280
/**
-
 
1281
 * Maps the contents of an object, returning the address it is mapped
-
 
1282
 * into.
-
 
1283
 *
-
 
1284
 * While the mapping holds a reference on the contents of the object, it doesn't
-
 
1285
 * imply a ref on the object itself.
Line -... Line 1286...
-
 
1286
 */
-
 
1287
int
Line -... Line 1288...
-
 
1288
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
-
 
1289
		    struct drm_file *file)
Line 1441... Line 1535...
1441
 
1535
 
1442
 
1536
 
1443
static int
1537
static int
-
 
1538
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1444
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1539
{
-
 
1540
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1445
{
1541
    int page_count, i;
1446
    int page_count, i;
1542
	struct address_space *mapping;
1447
	struct sg_table *st;
1543
	struct sg_table *st;
1448
	struct scatterlist *sg;
1544
	struct scatterlist *sg;
Line 1471... Line 1567...
1471
	 * at this point until we release them.
1567
	 * at this point until we release them.
1472
	 *
1568
	 *
1473
	 * Fail silently without starting the shrinker
1569
	 * Fail silently without starting the shrinker
1474
	 */
1570
	 */
1475
	for_each_sg(st->sgl, sg, page_count, i) {
1571
	for_each_sg(st->sgl, sg, page_count, i) {
1476
        page = (struct page *)AllocPage(); // oh-oh
1572
		page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
1477
        if ( page == 0 )
1573
		if (IS_ERR(page)) {
-
 
1574
            dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
1478
			goto err_pages;
1575
			goto err_pages;
Line -... Line 1576...
-
 
1576
 
1479
 
1577
		}
1480
		sg_set_page(sg, page, PAGE_SIZE, 0);
1578
		sg_set_page(sg, page, PAGE_SIZE, 0);
Line 1481... Line 1579...
1481
	}
1579
	}
Line 1482... Line 1580...
1482
 
1580
 
Line 1483... Line 1581...
1483
	obj->pages = st;
1581
	obj->pages = st;
Line 1484... Line 1582...
1484
 
1582
 
1485
//    DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
1583
    DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
Line 1947... Line -...
1947
 
-
 
1948
 
-
 
1949
 
2045
 
1950
 
2046
 
1951
 
2047
 
1952
 
2048
 
1953
 
2049
 
Line 2819... Line 2915...
2819
	obj->cache_level = cache_level;
2915
	obj->cache_level = cache_level;
2820
	i915_gem_verify_gtt(dev);
2916
	i915_gem_verify_gtt(dev);
2821
	return 0;
2917
	return 0;
2822
}
2918
}
Line -... Line 2919...
-
 
2919
 
-
 
2920
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
-
 
2921
			       struct drm_file *file)
-
 
2922
{
-
 
2923
	struct drm_i915_gem_caching *args = data;
-
 
2924
	struct drm_i915_gem_object *obj;
-
 
2925
	int ret;
-
 
2926
 
-
 
2927
	ret = i915_mutex_lock_interruptible(dev);
-
 
2928
	if (ret)
-
 
2929
		return ret;
-
 
2930
 
-
 
2931
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-
 
2932
	if (&obj->base == NULL) {
-
 
2933
		ret = -ENOENT;
-
 
2934
		goto unlock;
-
 
2935
	}
-
 
2936
 
-
 
2937
	args->caching = obj->cache_level != I915_CACHE_NONE;
-
 
2938
 
-
 
2939
	drm_gem_object_unreference(&obj->base);
-
 
2940
unlock:
-
 
2941
	mutex_unlock(&dev->struct_mutex);
-
 
2942
	return ret;
-
 
2943
}
-
 
2944
 
-
 
2945
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
-
 
2946
			       struct drm_file *file)
-
 
2947
{
-
 
2948
	struct drm_i915_gem_caching *args = data;
-
 
2949
	struct drm_i915_gem_object *obj;
-
 
2950
	enum i915_cache_level level;
-
 
2951
	int ret;
-
 
2952
 
-
 
2953
	switch (args->caching) {
-
 
2954
	case I915_CACHING_NONE:
-
 
2955
		level = I915_CACHE_NONE;
-
 
2956
		break;
-
 
2957
	case I915_CACHING_CACHED:
-
 
2958
		level = I915_CACHE_LLC;
-
 
2959
		break;
-
 
2960
	default:
-
 
2961
		return -EINVAL;
-
 
2962
	}
-
 
2963
 
-
 
2964
	ret = i915_mutex_lock_interruptible(dev);
-
 
2965
	if (ret)
-
 
2966
		return ret;
-
 
2967
 
-
 
2968
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-
 
2969
	if (&obj->base == NULL) {
-
 
2970
		ret = -ENOENT;
-
 
2971
		goto unlock;
-
 
2972
	}
-
 
2973
 
-
 
2974
	ret = i915_gem_object_set_cache_level(obj, level);
-
 
2975
 
-
 
2976
	drm_gem_object_unreference(&obj->base);
-
 
2977
unlock:
-
 
2978
	mutex_unlock(&dev->struct_mutex);
-
 
2979
	return ret;
-
 
2980
}
2823
 
2981
 
2824
/*
2982
/*
2825
 * Prepare buffer for display plane (scanout, cursors, etc).
2983
 * Prepare buffer for display plane (scanout, cursors, etc).
2826
 * Can be called from an uninterruptible phase (modesetting) and allows
2984
 * Can be called from an uninterruptible phase (modesetting) and allows
2827
 * any flushes to be pipelined (for pageflips).
2985
 * any flushes to be pipelined (for pageflips).
Line 3143... Line 3301...
3143
unlock:
3301
unlock:
3144
	mutex_unlock(&dev->struct_mutex);
3302
	mutex_unlock(&dev->struct_mutex);
3145
	return ret;
3303
	return ret;
3146
}
3304
}
Line -... Line 3305...
-
 
3305
 
-
 
3306
#endif
3147
 
3307
 
3148
int
3308
int
3149
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3309
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3150
		    struct drm_file *file)
3310
		    struct drm_file *file)
3151
{
3311
{
Line 3180... Line 3340...
3180
unlock:
3340
unlock:
3181
	mutex_unlock(&dev->struct_mutex);
3341
	mutex_unlock(&dev->struct_mutex);
3182
	return ret;
3342
	return ret;
3183
}
3343
}
Line -... Line 3344...
-
 
3344
 
3184
 
3345
#if 0
3185
int
3346
int
3186
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3347
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3187
			struct drm_file *file_priv)
3348
			struct drm_file *file_priv)
3188
{
3349
{