Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4371 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3746 Rev 4104
Line 33... Line 33...
33
#include "intel_drv.h"
33
#include "intel_drv.h"
34
//#include 
34
//#include 
Line 35... Line 35...
35
 
35
 
36
#define I915_EXEC_SECURE        (1<<9)
36
#define I915_EXEC_SECURE        (1<<9)
-
 
37
#define I915_EXEC_IS_PINNED     (1<<10)
Line 37... Line 38...
37
#define I915_EXEC_IS_PINNED     (1<<10)
38
#define I915_EXEC_VEBOX         (4<<0)
Line 38... Line 39...
38
 
39
 
Line 39... Line -...
39
#define wmb() asm volatile ("sfence")
-
 
40
 
-
 
41
struct drm_i915_gem_object *get_fb_obj();
-
 
42
 
-
 
43
static inline __attribute__((const))
-
 
Line 44... Line 40...
44
bool is_power_of_2(unsigned long n)
40
#define wmb() asm volatile ("sfence")
45
{
41
 
46
    return (n != 0 && ((n & (n - 1)) == 0));
42
struct drm_i915_gem_object *get_fb_obj();
47
}
43
 
Line 202... Line 198...
202
}
198
}
Line 203... Line 199...
203
 
199
 
204
static int
200
static int
205
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
201
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
206
				   struct eb_objects *eb,
202
				   struct eb_objects *eb,
-
 
203
				   struct drm_i915_gem_relocation_entry *reloc,
207
				   struct drm_i915_gem_relocation_entry *reloc)
204
				   struct i915_address_space *vm)
208
{
205
{
209
	struct drm_device *dev = obj->base.dev;
206
	struct drm_device *dev = obj->base.dev;
210
	struct drm_gem_object *target_obj;
207
	struct drm_gem_object *target_obj;
211
	struct drm_i915_gem_object *target_i915_obj;
208
	struct drm_i915_gem_object *target_i915_obj;
Line 216... Line 213...
216
	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
213
	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
217
	if (unlikely(target_obj == NULL))
214
	if (unlikely(target_obj == NULL))
218
		return -ENOENT;
215
		return -ENOENT;
Line 219... Line 216...
219
 
216
 
220
	target_i915_obj = to_intel_bo(target_obj);
217
	target_i915_obj = to_intel_bo(target_obj);
Line 221... Line 218...
221
	target_offset = target_i915_obj->gtt_offset;
218
	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
222
 
219
 
223
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
220
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
224
	 * pipe_control writes because the gpu doesn't properly redirect them
221
	 * pipe_control writes because the gpu doesn't properly redirect them
Line 306... Line 303...
306
		ret = i915_gem_object_put_fence(obj);
303
		ret = i915_gem_object_put_fence(obj);
307
		if (ret)
304
		if (ret)
308
			return ret;
305
			return ret;
Line 309... Line 306...
309
 
306
 
310
		/* Map the page containing the relocation we're going to perform.  */
307
		/* Map the page containing the relocation we're going to perform.  */
311
		reloc->offset += obj->gtt_offset;
308
        reloc->offset += i915_gem_obj_ggtt_offset(obj);
312
        reloc_page = (void*)MapIoMem(reloc->offset & PAGE_MASK, 4096, 3);
309
        reloc_page = (void*)MapIoMem(reloc->offset & PAGE_MASK, 4096, 3);
313
		reloc_entry = (uint32_t __iomem *)
310
		reloc_entry = (uint32_t __iomem *)
314
			(reloc_page + (reloc->offset & ~PAGE_MASK));
311
			(reloc_page + (reloc->offset & ~PAGE_MASK));
315
		iowrite32(reloc->delta, reloc_entry);
312
		iowrite32(reloc->delta, reloc_entry);
Line 322... Line 319...
322
	return 0;
319
	return 0;
323
}
320
}
Line 324... Line 321...
324
 
321
 
325
static int
322
static int
326
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
323
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-
 
324
				    struct eb_objects *eb,
327
				    struct eb_objects *eb)
325
				    struct i915_address_space *vm)
328
{
326
{
329
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
327
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
330
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
328
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
331
	struct drm_i915_gem_relocation_entry __user *user_relocs;
329
	struct drm_i915_gem_relocation_entry __user *user_relocs;
Line 345... Line 343...
345
        memcpy(r, user_relocs, count*sizeof(r[0]));
343
        memcpy(r, user_relocs, count*sizeof(r[0]));
Line 346... Line 344...
346
 
344
 
347
		do {
345
		do {
Line 348... Line 346...
348
			u64 offset = r->presumed_offset;
346
			u64 offset = r->presumed_offset;
-
 
347
 
349
 
348
			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
350
			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
349
								 vm);
Line 351... Line 350...
351
			if (ret)
350
			if (ret)
352
				return ret;
351
				return ret;
Line 365... Line 364...
365
}
364
}
Line 366... Line 365...
366
 
365
 
367
static int
366
static int
368
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
367
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
369
					 struct eb_objects *eb,
368
					 struct eb_objects *eb,
-
 
369
					 struct drm_i915_gem_relocation_entry *relocs,
370
					 struct drm_i915_gem_relocation_entry *relocs)
370
					 struct i915_address_space *vm)
371
{
371
{
372
	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
372
	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Line 373... Line 373...
373
	int i, ret;
373
	int i, ret;
374
 
374
 
-
 
375
	for (i = 0; i < entry->relocation_count; i++) {
375
	for (i = 0; i < entry->relocation_count; i++) {
376
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
376
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
377
							 vm);
377
		if (ret)
378
		if (ret)
Line 378... Line 379...
378
			return ret;
379
			return ret;
379
	}
380
	}
Line 380... Line 381...
380
 
381
 
381
	return 0;
382
	return 0;
-
 
383
}
382
}
384
 
383
 
385
static int
384
static int
386
i915_gem_execbuffer_relocate(struct eb_objects *eb,
Line 385... Line 387...
385
i915_gem_execbuffer_relocate(struct eb_objects *eb)
387
			     struct i915_address_space *vm)
Line 394... Line 396...
394
	 * acquire the struct mutex again. Obviously this is bad and so
396
	 * acquire the struct mutex again. Obviously this is bad and so
395
	 * lockdep complains vehemently.
397
	 * lockdep complains vehemently.
396
	 */
398
	 */
397
//   pagefault_disable();
399
//	pagefault_disable();
398
	list_for_each_entry(obj, &eb->objects, exec_list) {
400
	list_for_each_entry(obj, &eb->objects, exec_list) {
399
		ret = i915_gem_execbuffer_relocate_object(obj, eb);
401
		ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
400
		if (ret)
402
		if (ret)
401
			break;
403
			break;
402
	}
404
	}
403
//   pagefault_enable();
405
//   pagefault_enable();
Line 416... Line 418...
416
}
418
}
Line 417... Line 419...
417
 
419
 
418
static int
420
static int
419
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
421
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-
 
422
				   struct intel_ring_buffer *ring,
420
				   struct intel_ring_buffer *ring,
423
				   struct i915_address_space *vm,
421
				   bool *need_reloc)
424
				   bool *need_reloc)
422
{
425
{
423
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
426
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
424
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
427
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
425
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
428
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
426
	bool need_fence, need_mappable;
429
	bool need_fence, need_mappable;
Line 427... Line -...
427
	int ret;
-
 
428
 
-
 
429
//    ENTER();
430
	int ret;
430
 
431
 
431
	need_fence =
432
	need_fence =
432
		has_fenced_gpu_access &&
433
		has_fenced_gpu_access &&
433
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
434
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
Line 434... Line 435...
434
		obj->tiling_mode != I915_TILING_NONE;
435
		obj->tiling_mode != I915_TILING_NONE;
-
 
436
	need_mappable = need_fence || need_reloc_mappable(obj);
435
	need_mappable = need_fence || need_reloc_mappable(obj);
437
 
436
 
-
 
437
	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
-
 
438
	if (ret)
438
	ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
439
    {
-
 
Line 440... Line 439...
440
        FAIL();
439
				  false);
Line 441... Line 440...
441
		return ret;
440
	if (ret)
442
    };
441
		return ret;
443
 
442
 
444
	entry->flags |= __EXEC_OBJECT_HAS_PIN;
443
	entry->flags |= __EXEC_OBJECT_HAS_PIN;
445
 
-
 
446
	if (has_fenced_gpu_access) {
-
 
447
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
444
 
448
			ret = i915_gem_object_get_fence(obj);
-
 
Line 449... Line 445...
449
			if (ret)
445
	if (has_fenced_gpu_access) {
450
            {
446
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Line 451... Line 447...
451
                FAIL();
447
			ret = i915_gem_object_get_fence(obj);
Line 465... Line 461...
465
				       obj, obj->cache_level);
461
				       obj, obj->cache_level);
Line 466... Line 462...
466
 
462
 
467
		obj->has_aliasing_ppgtt_mapping = 1;
463
		obj->has_aliasing_ppgtt_mapping = 1;
Line 468... Line 464...
468
	}
464
	}
469
 
465
 
470
	if (entry->offset != obj->gtt_offset) {
466
	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
471
	entry->offset = obj->gtt_offset;
467
		entry->offset = i915_gem_obj_offset(obj, vm);
Line 472... Line 468...
472
		*need_reloc = true;
468
		*need_reloc = true;
473
	}
469
	}
Line 487... Line 483...
487
static void
483
static void
488
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
484
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
489
{
485
{
490
	struct drm_i915_gem_exec_object2 *entry;
486
	struct drm_i915_gem_exec_object2 *entry;
Line 491... Line 487...
491
 
487
 
492
	if (!obj->gtt_space)
488
	if (!i915_gem_obj_bound_any(obj))
Line 493... Line 489...
493
		return;
489
		return;
Line 494... Line 490...
494
 
490
 
Line 504... Line 500...
504
}
500
}
Line 505... Line 501...
505
 
501
 
506
static int
502
static int
507
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
503
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-
 
504
			    struct list_head *objects,
508
			    struct list_head *objects,
505
			    struct i915_address_space *vm,
509
			    bool *need_relocs)
506
			    bool *need_relocs)
510
{
507
{
511
	struct drm_i915_gem_object *obj;
508
	struct drm_i915_gem_object *obj;
512
	struct list_head ordered_objects;
509
	struct list_head ordered_objects;
513
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
510
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
Line 514... Line -...
514
	int retry;
-
 
515
 
-
 
516
//    ENTER();
511
	int retry;
517
 
512
 
518
	INIT_LIST_HEAD(&ordered_objects);
513
	INIT_LIST_HEAD(&ordered_objects);
519
	while (!list_empty(objects)) {
514
	while (!list_empty(objects)) {
Line 560... Line 555...
560
 
555
 
561
		/* Unbind any ill-fitting objects or pin. */
556
		/* Unbind any ill-fitting objects or pin. */
562
		list_for_each_entry(obj, objects, exec_list) {
557
		list_for_each_entry(obj, objects, exec_list) {
563
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
558
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-
 
559
			bool need_fence, need_mappable;
Line 564... Line 560...
564
			bool need_fence, need_mappable;
560
			u32 obj_offset;
565
 
561
 
Line -... Line 562...
-
 
562
			if (!i915_gem_obj_bound(obj, vm))
566
			if (!obj->gtt_space)
563
				continue;
567
				continue;
564
 
568
 
565
			obj_offset = i915_gem_obj_offset(obj, vm);
569
			need_fence =
566
			need_fence =
570
				has_fenced_gpu_access &&
567
				has_fenced_gpu_access &&
Line -... Line 568...
-
 
568
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-
 
569
				obj->tiling_mode != I915_TILING_NONE;
571
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
570
			need_mappable = need_fence || need_reloc_mappable(obj);
572
				obj->tiling_mode != I915_TILING_NONE;
571
 
573
			need_mappable = need_fence || need_reloc_mappable(obj);
572
 
574
 
573
			if ((entry->alignment &&
575
			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
574
			     obj_offset & (entry->alignment - 1)) ||
576
			    (need_mappable && !obj->map_and_fenceable))
575
			    (need_mappable && !obj->map_and_fenceable))
577
				ret = i915_gem_object_unbind(obj);
576
				ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
578
			else
577
			else
Line 579... Line 578...
579
				ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
578
				ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
580
			if (ret)
579
			if (ret)
581
				goto err;
580
				goto err;
582
		}
581
		}
Line 583... Line 582...
583
 
582
 
584
		/* Bind fresh objects */
583
		/* Bind fresh objects */
585
		list_for_each_entry(obj, objects, exec_list) {
584
		list_for_each_entry(obj, objects, exec_list) {
586
			if (obj->gtt_space)
585
			if (i915_gem_obj_bound(obj, vm))
Line 587... Line 586...
587
				continue;
586
				continue;
588
 
587
 
589
			ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
588
			ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
Line 590... Line 589...
590
			if (ret)
589
			if (ret)
591
				goto err;
-
 
592
		}
-
 
593
 
590
				goto err;
594
err:		/* Decrement pin count for bound objects */
-
 
Line 595... Line 591...
595
		list_for_each_entry(obj, objects, exec_list)
591
		}
596
			i915_gem_execbuffer_unreserve_object(obj);
592
 
597
 
593
err:		/* Decrement pin count for bound objects */
598
		if (ret != -ENOSPC || retry++)
594
		list_for_each_entry(obj, objects, exec_list)
Line 611... Line 607...
611
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
607
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
612
				  struct drm_i915_gem_execbuffer2 *args,
608
				  struct drm_i915_gem_execbuffer2 *args,
613
				  struct drm_file *file,
609
				  struct drm_file *file,
614
				  struct intel_ring_buffer *ring,
610
				  struct intel_ring_buffer *ring,
615
				  struct eb_objects *eb,
611
				  struct eb_objects *eb,
616
				  struct drm_i915_gem_exec_object2 *exec)
612
				  struct drm_i915_gem_exec_object2 *exec,
-
 
613
				  struct i915_address_space *vm)
617
{
614
{
618
	struct drm_i915_gem_relocation_entry *reloc;
615
	struct drm_i915_gem_relocation_entry *reloc;
619
	struct drm_i915_gem_object *obj;
616
	struct drm_i915_gem_object *obj;
620
	bool need_relocs;
617
	bool need_relocs;
621
	int *reloc_offset;
618
	int *reloc_offset;
Line 695... Line 692...
695
	ret = eb_lookup_objects(eb, exec, args, file);
692
	ret = eb_lookup_objects(eb, exec, args, file);
696
	if (ret)
693
	if (ret)
697
			goto err;
694
			goto err;
Line 698... Line 695...
698
 
695
 
699
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
696
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
700
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
697
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
701
	if (ret)
698
	if (ret)
Line 702... Line 699...
702
		goto err;
699
		goto err;
703
 
700
 
704
	list_for_each_entry(obj, &eb->objects, exec_list) {
701
	list_for_each_entry(obj, &eb->objects, exec_list) {
705
		int offset = obj->exec_entry - exec;
702
		int offset = obj->exec_entry - exec;
-
 
703
		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
706
		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
704
							       reloc + reloc_offset[offset],
707
							       reloc + reloc_offset[offset]);
705
							       vm);
708
		if (ret)
706
		if (ret)
Line 709... Line 707...
709
			goto err;
707
			goto err;
Line 725... Line 723...
725
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
723
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
726
				struct list_head *objects)
724
				struct list_head *objects)
727
{
725
{
728
	struct drm_i915_gem_object *obj;
726
	struct drm_i915_gem_object *obj;
729
	uint32_t flush_domains = 0;
727
	uint32_t flush_domains = 0;
-
 
728
	bool flush_chipset = false;
730
	int ret;
729
	int ret;
Line 731... Line 730...
731
 
730
 
732
	list_for_each_entry(obj, objects, exec_list) {
731
	list_for_each_entry(obj, objects, exec_list) {
733
		ret = i915_gem_object_sync(obj, ring);
732
		ret = i915_gem_object_sync(obj, ring);
734
		if (ret)
733
		if (ret)
Line 735... Line 734...
735
			return ret;
734
			return ret;
736
 
735
 
Line 737... Line 736...
737
		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
736
		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
738
			i915_gem_clflush_object(obj);
737
			flush_chipset |= i915_gem_clflush_object(obj, false);
Line 739... Line 738...
739
 
738
 
740
		flush_domains |= obj->base.write_domain;
739
		flush_domains |= obj->base.write_domain;
Line 741... Line 740...
741
	}
740
	}
742
 
741
 
Line 801... Line 800...
801
	return 0;
800
	return 0;
802
}
801
}
Line 803... Line 802...
803
 
802
 
804
static void
803
static void
-
 
804
i915_gem_execbuffer_move_to_active(struct list_head *objects,
805
i915_gem_execbuffer_move_to_active(struct list_head *objects,
805
				   struct i915_address_space *vm,
806
				   struct intel_ring_buffer *ring)
806
				   struct intel_ring_buffer *ring)
807
{
807
{
Line 808... Line 808...
808
	struct drm_i915_gem_object *obj;
808
	struct drm_i915_gem_object *obj;
Line 815... Line 815...
815
		if (obj->base.write_domain == 0)
815
		if (obj->base.write_domain == 0)
816
			obj->base.pending_read_domains |= obj->base.read_domains;
816
			obj->base.pending_read_domains |= obj->base.read_domains;
817
		obj->base.read_domains = obj->base.pending_read_domains;
817
		obj->base.read_domains = obj->base.pending_read_domains;
818
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
818
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
Line -... Line 819...
-
 
819
 
-
 
820
		/* FIXME: This lookup gets fixed later <-- danvet */
819
 
821
		list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
820
		i915_gem_object_move_to_active(obj, ring);
822
		i915_gem_object_move_to_active(obj, ring);
821
		if (obj->base.write_domain) {
823
		if (obj->base.write_domain) {
822
			obj->dirty = 1;
824
			obj->dirty = 1;
823
			obj->last_write_seqno = intel_ring_get_seqno(ring);
825
			obj->last_write_seqno = intel_ring_get_seqno(ring);
824
			if (obj->pin_count) /* check for potential scanout */
826
			if (obj->pin_count) /* check for potential scanout */
825
				intel_mark_fb_busy(obj);
827
				intel_mark_fb_busy(obj, ring);
Line 826... Line 828...
826
		}
828
		}
827
 
829
 
828
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
830
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
Line 829... Line 831...
829
	}
831
	}
830
}
832
}
831
 
833
 
832
static void
834
static void
-
 
835
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
833
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
836
				    struct drm_file *file,
834
				    struct drm_file *file,
837
				    struct intel_ring_buffer *ring,
835
				    struct intel_ring_buffer *ring)
838
				    struct drm_i915_gem_object *obj)
Line 836... Line 839...
836
{
839
{
837
	/* Unconditionally force add_request to emit a full flush. */
840
	/* Unconditionally force add_request to emit a full flush. */
838
	ring->gpu_caches_dirty = true;
841
	ring->gpu_caches_dirty = true;
Line 839... Line 842...
839
 
842
 
840
	/* Add a breadcrumb for the completion of the batch buffer */
843
	/* Add a breadcrumb for the completion of the batch buffer */
841
	(void)i915_add_request(ring, file, NULL);
844
	(void)__i915_add_request(ring, file, obj, NULL);
Line 868... Line 871...
868
 
871
 
869
static int
872
static int
870
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
873
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
871
		       struct drm_file *file,
874
		       struct drm_file *file,
872
		       struct drm_i915_gem_execbuffer2 *args,
875
		       struct drm_i915_gem_execbuffer2 *args,
-
 
876
		       struct drm_i915_gem_exec_object2 *exec,
873
		       struct drm_i915_gem_exec_object2 *exec)
877
		       struct i915_address_space *vm)
874
{
878
{
875
	drm_i915_private_t *dev_priv = dev->dev_private;
879
	drm_i915_private_t *dev_priv = dev->dev_private;
876
	struct eb_objects *eb;
880
	struct eb_objects *eb;
877
	struct drm_i915_gem_object *batch_obj;
881
	struct drm_i915_gem_object *batch_obj;
Line 882... Line 886...
882
	u32 mask, flags;
886
	u32 mask, flags;
883
	int ret, mode, i;
887
	int ret, mode, i;
884
	bool need_relocs;
888
	bool need_relocs;
Line 885... Line 889...
885
 
889
 
886
	if (!i915_gem_check_execbuffer(args))
-
 
887
	{
-
 
888
        FAIL();
890
	if (!i915_gem_check_execbuffer(args))
889
		return -EINVAL;
-
 
Line 890... Line 891...
890
	}
891
		return -EINVAL;
891
 
892
 
892
	ret = validate_exec_list(exec, args->buffer_count);
-
 
893
	if (ret)
-
 
894
    {
893
	ret = validate_exec_list(exec, args->buffer_count);
895
        FAIL();
-
 
Line 896... Line 894...
896
		return ret;
894
	if (ret)
897
    };
895
		return ret;
Line 898... Line 896...
898
 
896
 
Line 909... Line 907...
909
	case I915_EXEC_RENDER:
907
	case I915_EXEC_RENDER:
910
		ring = &dev_priv->ring[RCS];
908
		ring = &dev_priv->ring[RCS];
911
		break;
909
		break;
912
	case I915_EXEC_BSD:
910
	case I915_EXEC_BSD:
913
		ring = &dev_priv->ring[VCS];
911
		ring = &dev_priv->ring[VCS];
914
		if (ctx_id != 0) {
912
		if (ctx_id != DEFAULT_CONTEXT_ID) {
915
			DRM_DEBUG("Ring %s doesn't support contexts\n",
913
			DRM_DEBUG("Ring %s doesn't support contexts\n",
916
				  ring->name);
914
				  ring->name);
917
            FAIL();
-
 
918
			return -EPERM;
915
			return -EPERM;
919
		}
916
		}
920
		break;
917
		break;
921
	case I915_EXEC_BLT:
918
	case I915_EXEC_BLT:
922
		ring = &dev_priv->ring[BCS];
919
		ring = &dev_priv->ring[BCS];
923
		if (ctx_id != 0) {
920
		if (ctx_id != DEFAULT_CONTEXT_ID) {
924
			DRM_DEBUG("Ring %s doesn't support contexts\n",
921
			DRM_DEBUG("Ring %s doesn't support contexts\n",
925
				  ring->name);
922
				  ring->name);
926
			return -EPERM;
923
			return -EPERM;
927
		}
924
		}
928
		break;
925
		break;
-
 
926
	case I915_EXEC_VEBOX:
-
 
927
		ring = &dev_priv->ring[VECS];
-
 
928
		if (ctx_id != DEFAULT_CONTEXT_ID) {
-
 
929
			DRM_DEBUG("Ring %s doesn't support contexts\n",
-
 
930
				  ring->name);
-
 
931
			return -EPERM;
-
 
932
		}
-
 
933
		break;
-
 
934
 
929
	default:
935
	default:
930
		DRM_DEBUG("execbuf with unknown ring: %d\n",
936
		DRM_DEBUG("execbuf with unknown ring: %d\n",
931
			  (int)(args->flags & I915_EXEC_RING_MASK));
937
			  (int)(args->flags & I915_EXEC_RING_MASK));
932
		return -EINVAL;
938
		return -EINVAL;
933
	}
939
	}
Line 1001... Line 1007...
1001
 
1007
 
1002
	ret = i915_mutex_lock_interruptible(dev);
1008
	ret = i915_mutex_lock_interruptible(dev);
1003
	if (ret)
1009
	if (ret)
Line 1004... Line 1010...
1004
		goto pre_mutex_err;
1010
		goto pre_mutex_err;
1005
 
1011
 
1006
	if (dev_priv->mm.suspended) {
1012
	if (dev_priv->ums.mm_suspended) {
1007
		mutex_unlock(&dev->struct_mutex);
1013
		mutex_unlock(&dev->struct_mutex);
1008
		ret = -EBUSY;
1014
		ret = -EBUSY;
Line 1026... Line 1032...
1026
			       struct drm_i915_gem_object,
1032
			       struct drm_i915_gem_object,
1027
			       exec_list);
1033
			       exec_list);
Line 1028... Line 1034...
1028
 
1034
 
1029
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1035
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1030
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1036
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1031
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
1037
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
1032
	if (ret)
1038
	if (ret)
Line 1033... Line 1039...
1033
		goto err;
1039
		goto err;
1034
 
1040
 
1035
	/* The objects are in their final locations, apply the relocations. */
1041
	/* The objects are in their final locations, apply the relocations. */
1036
	if (need_relocs)
1042
	if (need_relocs)
1037
		ret = i915_gem_execbuffer_relocate(eb);
1043
		ret = i915_gem_execbuffer_relocate(eb, vm);
1038
	if (ret) {
1044
	if (ret) {
1039
		if (ret == -EFAULT) {
1045
		if (ret == -EFAULT) {
1040
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1046
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1041
								eb, exec);
1047
								eb, exec, vm);
1042
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1048
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1043
		}
1049
		}
1044
		if (ret)
1050
		if (ret)
Line 1087... Line 1093...
1087
		ret = i915_reset_gen7_sol_offsets(dev, ring);
1093
		ret = i915_reset_gen7_sol_offsets(dev, ring);
1088
		if (ret)
1094
		if (ret)
1089
			goto err;
1095
			goto err;
1090
	}
1096
	}
Line 1091... Line 1097...
1091
 
1097
 
-
 
1098
	exec_start = i915_gem_obj_offset(batch_obj, vm) +
1092
	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1099
		args->batch_start_offset;
1093
	exec_len = args->batch_len;
1100
	exec_len = args->batch_len;
Line 1094... Line 1101...
1094
	if (cliprects) {
1101
	if (cliprects) {
1095
 
1102
 
Line 1101... Line 1108...
1101
			goto err;
1108
			goto err;
1102
	}
1109
	}
Line 1103... Line 1110...
1103
 
1110
 
Line 1104... Line 1111...
1104
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1111
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1105
 
1112
 
Line 1106... Line 1113...
1106
	i915_gem_execbuffer_move_to_active(&eb->objects, ring);
1113
	i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
1107
	i915_gem_execbuffer_retire_commands(dev, file, ring);
1114
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
Line 1108... Line 1115...
1108
 
1115
 
Line 1120... Line 1127...
1120
 
1127
 
1121
int
1128
int
1122
i915_gem_execbuffer2(struct drm_device *dev, void *data,
1129
i915_gem_execbuffer2(struct drm_device *dev, void *data,
1123
		     struct drm_file *file)
1130
		     struct drm_file *file)
-
 
1131
{
1124
{
1132
	struct drm_i915_private *dev_priv = dev->dev_private;
1125
	struct drm_i915_gem_execbuffer2 *args = data;
1133
	struct drm_i915_gem_execbuffer2 *args = data;
1126
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1134
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
Line 1127... Line -...
1127
	int ret;
-
 
1128
 
-
 
1129
//    ENTER();
1135
	int ret;
1130
 
1136
 
1131
	if (args->buffer_count < 1 ||
1137
	if (args->buffer_count < 1 ||
1132
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
-
 
1133
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1138
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1134
        FAIL();
1139
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Line 1135... Line 1140...
1135
		return -EINVAL;
1140
		return -EINVAL;
1136
	}
1141
	}
1137
 
1142
 
1138
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1143
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1139
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1144
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1140
	if (exec2_list == NULL) {
-
 
1141
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1145
	if (exec2_list == NULL) {
1142
			  args->buffer_count);
1146
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1143
        FAIL();
1147
			  args->buffer_count);
1144
		return -ENOMEM;
1148
		return -ENOMEM;
1145
	}
1149
	}
Line 1153... Line 1157...
1153
        kfree(exec2_list);
1157
        kfree(exec2_list);
1154
        FAIL();
1158
        FAIL();
1155
		return -EFAULT;
1159
		return -EFAULT;
1156
	}
1160
	}
Line 1157... Line 1161...
1157
 
1161
 
-
 
1162
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1158
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1163
				     &dev_priv->gtt.base);
1159
	if (!ret) {
1164
	if (!ret) {
1160
		/* Copy the new buffer offsets back to the user's exec list. */
1165
		/* Copy the new buffer offsets back to the user's exec list. */
1161
		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1166
		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1162
				   exec2_list,
1167
				   exec2_list,