Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 35... Line 35...
35
#include 
35
#include 
36
#include 
36
#include 
37
#include 
37
#include 
38
#include 
38
#include 
39
#include 
39
#include 
-
 
40
#include 
40
#include 
41
#include 
41
 
-
 
42
#define pr_err(fmt, ...) \
42
#include 
43
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
43
#include 
Line 44... Line 44...
44
 
44
 
45
#define TTM_ASSERT_LOCKED(param)
45
#define TTM_ASSERT_LOCKED(param)
46
#define TTM_DEBUG(fmt, arg...)
46
#define TTM_DEBUG(fmt, arg...)
Line 47... Line 47...
47
#define TTM_BO_HASH_ORDER 13
47
#define TTM_BO_HASH_ORDER 13
-
 
48
 
48
 
49
 
49
 
50
 
Line 50... Line 51...
50
 
51
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
51
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
52
					  uint32_t *mem_type)
52
{
53
{
53
	int i;
54
	int i;
54
 
55
 
55
	for (i = 0; i <= TTM_PL_PRIV5; i++)
56
	for (i = 0; i <= TTM_PL_PRIV5; i++)
56
		if (flags & (1 << i)) {
57
		if (place->flags & (1 << i)) {
Line 81... Line 82...
81
	size_t acc_size = bo->acc_size;
82
	size_t acc_size = bo->acc_size;
Line 82... Line 83...
82
 
83
 
83
	BUG_ON(atomic_read(&bo->list_kref.refcount));
84
	BUG_ON(atomic_read(&bo->list_kref.refcount));
84
	BUG_ON(atomic_read(&bo->kref.refcount));
85
	BUG_ON(atomic_read(&bo->kref.refcount));
85
	BUG_ON(atomic_read(&bo->cpu_writers));
-
 
86
	BUG_ON(bo->sync_obj != NULL);
86
	BUG_ON(atomic_read(&bo->cpu_writers));
87
	BUG_ON(bo->mem.mm_node != NULL);
87
	BUG_ON(bo->mem.mm_node != NULL);
88
	BUG_ON(!list_empty(&bo->lru));
88
	BUG_ON(!list_empty(&bo->lru));
Line 89... Line 89...
89
	BUG_ON(!list_empty(&bo->ddestroy));
89
	BUG_ON(!list_empty(&bo->ddestroy));
Line 341... Line 341...
341
	ttm_bo_mem_put(bo, &bo->mem);
341
	ttm_bo_mem_put(bo, &bo->mem);
Line 342... Line 342...
342
 
342
 
343
	ww_mutex_unlock (&bo->resv->lock);
343
	ww_mutex_unlock (&bo->resv->lock);
Line -... Line 344...
-
 
344
}
-
 
345
 
-
 
346
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
-
 
347
{
-
 
348
	struct reservation_object_list *fobj;
-
 
349
	struct fence *fence;
-
 
350
	int i;
-
 
351
 
-
 
352
	fobj = reservation_object_get_list(bo->resv);
-
 
353
	fence = reservation_object_get_excl(bo->resv);
-
 
354
	if (fence && !fence->ops->signaled)
-
 
355
		fence_enable_sw_signaling(fence);
-
 
356
 
-
 
357
	for (i = 0; fobj && i < fobj->shared_count; ++i) {
-
 
358
		fence = rcu_dereference_protected(fobj->shared[i],
-
 
359
					reservation_object_held(bo->resv));
-
 
360
 
-
 
361
		if (!fence->ops->signaled)
-
 
362
			fence_enable_sw_signaling(fence);
-
 
363
	}
344
}
364
}
345
 
365
 
346
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
366
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
347
{
367
{
348
	struct ttm_bo_device *bdev = bo->bdev;
-
 
349
	struct ttm_bo_global *glob = bo->glob;
-
 
350
	struct ttm_bo_driver *driver = bdev->driver;
368
	struct ttm_bo_device *bdev = bo->bdev;
351
	void *sync_obj = NULL;
369
	struct ttm_bo_global *glob = bo->glob;
Line 352... Line 370...
352
	int put_count;
370
	int put_count;
353
	int ret;
371
	int ret;
Line 354... Line 372...
354
 
372
 
355
	spin_lock(&glob->lru_lock);
373
	spin_lock(&glob->lru_lock);
356
	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
-
 
357
 
-
 
358
	spin_lock(&bdev->fence_lock);
374
	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
Line 359... Line 375...
359
	(void) ttm_bo_wait(bo, false, false, true);
375
 
360
	if (!ret && !bo->sync_obj) {
376
	if (!ret) {
Line 361... Line 377...
361
		spin_unlock(&bdev->fence_lock);
377
		if (!ttm_bo_wait(bo, false, false, true)) {
Line 362... Line 378...
362
		put_count = ttm_bo_del_from_lru(bo);
378
		put_count = ttm_bo_del_from_lru(bo);
363
 
379
 
364
		spin_unlock(&glob->lru_lock);
-
 
365
		ttm_bo_cleanup_memtype_use(bo);
-
 
366
 
380
		spin_unlock(&glob->lru_lock);
367
		ttm_bo_list_ref_sub(bo, put_count, true);
-
 
368
 
-
 
Line 369... Line 381...
369
		return;
381
		ttm_bo_cleanup_memtype_use(bo);
370
	}
382
 
371
	if (bo->sync_obj)
383
		ttm_bo_list_ref_sub(bo, put_count, true);
372
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
384
 
Line 389... Line 401...
389
 
401
 
390
	kref_get(&bo->list_kref);
402
	kref_get(&bo->list_kref);
391
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
403
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
Line 392... Line -...
392
	spin_unlock(&glob->lru_lock);
-
 
393
 
-
 
394
	if (sync_obj) {
-
 
395
		driver->sync_obj_flush(sync_obj);
-
 
396
		driver->sync_obj_unref(&sync_obj);
404
	spin_unlock(&glob->lru_lock);
397
	}
405
 
398
//	schedule_delayed_work(&bdev->wq,
406
//	schedule_delayed_work(&bdev->wq,
Line 399... Line 407...
399
//			      ((HZ / 100) < 1) ? 1 : HZ / 100);
407
//			      ((HZ / 100) < 1) ? 1 : HZ / 100);
Line 413... Line 421...
413
 
421
 
414
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
422
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
415
					  bool interruptible,
423
					  bool interruptible,
416
					  bool no_wait_gpu)
424
					  bool no_wait_gpu)
417
{
-
 
418
	struct ttm_bo_device *bdev = bo->bdev;
-
 
419
	struct ttm_bo_driver *driver = bdev->driver;
425
{
420
	struct ttm_bo_global *glob = bo->glob;
426
	struct ttm_bo_global *glob = bo->glob;
421
	int put_count;
427
	int put_count;
Line 422... Line -...
422
	int ret;
-
 
423
 
428
	int ret;
Line 424... Line 429...
424
	spin_lock(&bdev->fence_lock);
429
 
425
	ret = ttm_bo_wait(bo, false, false, true);
430
	ret = ttm_bo_wait(bo, false, false, true);
426
 
-
 
427
	if (ret && !no_wait_gpu) {
-
 
428
		void *sync_obj;
-
 
429
 
-
 
430
		/*
-
 
431
		 * Take a reference to the fence and unreserve,
-
 
432
		 * at this point the buffer should be dead, so
-
 
433
		 * no new sync objects can be attached.
431
 
434
		 */
-
 
435
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-
 
436
		spin_unlock(&bdev->fence_lock);
432
	if (ret && !no_wait_gpu) {
Line 437... Line 433...
437
 
433
		long lret;
438
		__ttm_bo_unreserve(bo);
434
		ww_mutex_unlock(&bo->resv->lock);
439
		spin_unlock(&glob->lru_lock);
435
		spin_unlock(&glob->lru_lock);
440
 
436
 
441
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
437
		lret = reservation_object_wait_timeout_rcu(bo->resv,
442
		driver->sync_obj_unref(&sync_obj);
-
 
443
		if (ret)
-
 
444
			return ret;
-
 
445
 
-
 
446
		/*
438
							   true,
447
		 * remove sync_obj with ttm_bo_wait, the wait should be
-
 
448
		 * finished, and no new wait object should have been added.
439
							   interruptible,
449
		 */
-
 
450
		spin_lock(&bdev->fence_lock);
440
							   30 * HZ);
451
		ret = ttm_bo_wait(bo, false, false, true);
441
 
Line 452... Line 442...
452
		WARN_ON(ret);
442
		if (lret < 0)
453
		spin_unlock(&bdev->fence_lock);
443
			return lret;
Line 454... Line 444...
454
		if (ret)
444
		else if (lret == 0)
Line 467... Line 457...
467
		 */
457
		 */
468
		if (ret) {
458
		if (ret) {
469
			spin_unlock(&glob->lru_lock);
459
			spin_unlock(&glob->lru_lock);
470
			return 0;
460
			return 0;
471
		}
461
		}
-
 
462
 
-
 
463
		/*
-
 
464
		 * remove sync_obj with ttm_bo_wait, the wait should be
-
 
465
		 * finished, and no new wait object should have been added.
472
	} else
466
		 */
473
		spin_unlock(&bdev->fence_lock);
467
		ret = ttm_bo_wait(bo, false, false, true);
-
 
468
		WARN_ON(ret);
-
 
469
	}
Line 474... Line 470...
474
 
470
 
475
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
471
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
476
		__ttm_bo_unreserve(bo);
472
		__ttm_bo_unreserve(bo);
477
		spin_unlock(&glob->lru_lock);
473
		spin_unlock(&glob->lru_lock);
Line 599... Line 595...
599
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
595
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
600
 * space, or we've evicted everything and there isn't enough space.
596
 * space, or we've evicted everything and there isn't enough space.
601
 */
597
 */
602
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
598
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
603
					uint32_t mem_type,
599
					uint32_t mem_type,
604
					struct ttm_placement *placement,
600
					const struct ttm_place *place,
605
					struct ttm_mem_reg *mem,
601
					struct ttm_mem_reg *mem,
606
					bool interruptible,
602
					bool interruptible,
607
					bool no_wait_gpu)
603
					bool no_wait_gpu)
608
{
604
{
609
	struct ttm_bo_device *bdev = bo->bdev;
605
	struct ttm_bo_device *bdev = bo->bdev;
610
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
606
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
611
	int ret;
607
	int ret;
Line 612... Line 608...
612
 
608
 
613
	do {
609
	do {
614
		ret = (*man->func->get_node)(man, bo, placement, 0, mem);
610
		ret = (*man->func->get_node)(man, bo, place, mem);
615
		if (unlikely(ret != 0))
611
		if (unlikely(ret != 0))
616
			return ret;
612
			return ret;
617
		if (mem->mm_node)
613
		if (mem->mm_node)
618
			break;
614
			break;
Line 652... Line 648...
652
	return result;
648
	return result;
653
}
649
}
Line 654... Line 650...
654
 
650
 
655
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
651
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
656
				 uint32_t mem_type,
652
				 uint32_t mem_type,
657
				 uint32_t proposed_placement,
653
				 const struct ttm_place *place,
658
				 uint32_t *masked_placement)
654
				 uint32_t *masked_placement)
659
{
655
{
Line 660... Line 656...
660
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
656
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
661
 
657
 
Line 662... Line 658...
662
	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
658
	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
663
		return false;
659
		return false;
Line 664... Line 660...
664
 
660
 
Line 665... Line 661...
665
	if ((proposed_placement & man->available_caching) == 0)
661
	if ((place->flags & man->available_caching) == 0)
666
		return false;
662
		return false;
667
 
663
 
Line 694... Line 690...
694
	bool has_erestartsys = false;
690
	bool has_erestartsys = false;
695
	int i, ret;
691
	int i, ret;
Line 696... Line 692...
696
 
692
 
697
	mem->mm_node = NULL;
693
	mem->mm_node = NULL;
698
	for (i = 0; i < placement->num_placement; ++i) {
694
	for (i = 0; i < placement->num_placement; ++i) {
-
 
695
		const struct ttm_place *place = &placement->placement[i];
699
		ret = ttm_mem_type_from_flags(placement->placement[i],
696
 
700
						&mem_type);
697
		ret = ttm_mem_type_from_place(place, &mem_type);
701
		if (ret)
698
		if (ret)
702
			return ret;
699
			return ret;
Line 703... Line 700...
703
		man = &bdev->man[mem_type];
700
		man = &bdev->man[mem_type];
704
 
-
 
705
		type_ok = ttm_bo_mt_compatible(man,
-
 
706
						mem_type,
701
 
Line 707... Line 702...
707
						placement->placement[i],
702
		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
708
						&cur_flags);
703
						&cur_flags);
Line 714... Line 709...
714
						  cur_flags);
709
						  cur_flags);
715
		/*
710
		/*
716
		 * Use the access and other non-mapping-related flag bits from
711
		 * Use the access and other non-mapping-related flag bits from
717
		 * the memory placement flags to the current flags
712
		 * the memory placement flags to the current flags
718
		 */
713
		 */
719
		ttm_flag_masked(&cur_flags, placement->placement[i],
714
		ttm_flag_masked(&cur_flags, place->flags,
720
				~TTM_PL_MASK_MEMTYPE);
715
				~TTM_PL_MASK_MEMTYPE);
Line 721... Line 716...
721
 
716
 
722
		if (mem_type == TTM_PL_SYSTEM)
717
		if (mem_type == TTM_PL_SYSTEM)
Line 723... Line 718...
723
			break;
718
			break;
724
 
719
 
725
		if (man->has_type && man->use_type) {
720
		if (man->has_type && man->use_type) {
726
			type_found = true;
-
 
727
			ret = (*man->func->get_node)(man, bo, placement,
721
			type_found = true;
728
						     cur_flags, mem);
722
			ret = (*man->func->get_node)(man, bo, place, mem);
729
			if (unlikely(ret))
723
			if (unlikely(ret))
730
				return ret;
724
				return ret;
731
		}
725
		}
Line 741... Line 735...
741
 
735
 
742
	if (!type_found)
736
	if (!type_found)
Line 743... Line 737...
743
		return -EINVAL;
737
		return -EINVAL;
744
 
738
 
-
 
739
	for (i = 0; i < placement->num_busy_placement; ++i) {
745
	for (i = 0; i < placement->num_busy_placement; ++i) {
740
		const struct ttm_place *place = &placement->busy_placement[i];
746
		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
741
 
747
						&mem_type);
742
		ret = ttm_mem_type_from_place(place, &mem_type);
748
		if (ret)
743
		if (ret)
749
			return ret;
744
			return ret;
750
		man = &bdev->man[mem_type];
745
		man = &bdev->man[mem_type];
751
		if (!man->has_type)
746
		if (!man->has_type)
752
			continue;
-
 
753
		if (!ttm_bo_mt_compatible(man,
-
 
754
						mem_type,
-
 
755
						placement->busy_placement[i],
747
			continue;
Line 756... Line 748...
756
						&cur_flags))
748
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
757
			continue;
749
			continue;
758
 
750
 
759
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
751
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
760
						  cur_flags);
752
						  cur_flags);
761
		/*
753
		/*
762
		 * Use the access and other non-mapping-related flag bits from
754
		 * Use the access and other non-mapping-related flag bits from
763
		 * the memory placement flags to the current flags
755
		 * the memory placement flags to the current flags
Line 764... Line 756...
764
		 */
756
		 */
765
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
757
		ttm_flag_masked(&cur_flags, place->flags,
766
				~TTM_PL_MASK_MEMTYPE);
758
				~TTM_PL_MASK_MEMTYPE);
767
 
759
 
768
		if (mem_type == TTM_PL_SYSTEM) {
760
		if (mem_type == TTM_PL_SYSTEM) {
769
			mem->mem_type = mem_type;
761
			mem->mem_type = mem_type;
Line 770... Line 762...
770
			mem->placement = cur_flags;
762
			mem->placement = cur_flags;
771
			mem->mm_node = NULL;
763
			mem->mm_node = NULL;
772
			return 0;
764
			return 0;
773
		}
765
		}
774
 
766
 
775
		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
767
		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
Line 791... Line 783...
791
			bool interruptible,
783
			bool interruptible,
792
			bool no_wait_gpu)
784
			bool no_wait_gpu)
793
{
785
{
794
	int ret = 0;
786
	int ret = 0;
795
	struct ttm_mem_reg mem;
787
	struct ttm_mem_reg mem;
796
	struct ttm_bo_device *bdev = bo->bdev;
-
 
Line 797... Line 788...
797
 
788
 
Line 798... Line 789...
798
	lockdep_assert_held(&bo->resv->lock.base);
789
	lockdep_assert_held(&bo->resv->lock.base);
799
 
790
 
800
	/*
791
	/*
801
	 * FIXME: It's possible to pipeline buffer moves.
792
	 * FIXME: It's possible to pipeline buffer moves.
802
	 * Have the driver move function wait for idle when necessary,
793
	 * Have the driver move function wait for idle when necessary,
803
	 * instead of doing it here.
-
 
804
	 */
794
	 * instead of doing it here.
805
	spin_lock(&bdev->fence_lock);
-
 
806
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
795
	 */
807
	spin_unlock(&bdev->fence_lock);
796
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
808
	if (ret)
797
	if (ret)
809
		return ret;
798
		return ret;
810
	mem.num_pages = bo->num_pages;
799
	mem.num_pages = bo->num_pages;
Line 831... Line 820...
831
			      struct ttm_mem_reg *mem,
820
			      struct ttm_mem_reg *mem,
832
			      uint32_t *new_flags)
821
			      uint32_t *new_flags)
833
{
822
{
834
	int i;
823
	int i;
Line 835... Line -...
835
 
-
 
836
	if (mem->mm_node && placement->lpfn != 0 &&
-
 
837
	    (mem->start < placement->fpfn ||
-
 
838
	     mem->start + mem->num_pages > placement->lpfn))
-
 
839
		return false;
-
 
840
 
824
 
841
	for (i = 0; i < placement->num_placement; i++) {
825
	for (i = 0; i < placement->num_placement; i++) {
-
 
826
		const struct ttm_place *heap = &placement->placement[i];
-
 
827
		if (mem->mm_node &&
-
 
828
		    (mem->start < heap->fpfn ||
-
 
829
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
-
 
830
			continue;
-
 
831
 
842
		*new_flags = placement->placement[i];
832
		*new_flags = heap->flags;
843
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
833
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
844
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
834
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
845
			return true;
835
			return true;
Line 846... Line 836...
846
	}
836
	}
847
 
837
 
-
 
838
	for (i = 0; i < placement->num_busy_placement; i++) {
-
 
839
		const struct ttm_place *heap = &placement->busy_placement[i];
-
 
840
		if (mem->mm_node &&
-
 
841
		    (mem->start < heap->fpfn ||
-
 
842
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
-
 
843
			continue;
848
	for (i = 0; i < placement->num_busy_placement; i++) {
844
 
849
		*new_flags = placement->busy_placement[i];
845
		*new_flags = heap->flags;
850
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
846
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
851
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
847
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
Line 862... Line 858...
862
{
858
{
863
	int ret;
859
	int ret;
864
	uint32_t new_flags;
860
	uint32_t new_flags;
Line 865... Line 861...
865
 
861
 
866
	lockdep_assert_held(&bo->resv->lock.base);
-
 
867
	/* Check that range is valid */
-
 
868
	if (placement->lpfn || placement->fpfn)
-
 
869
		if (placement->fpfn > placement->lpfn ||
-
 
870
			(placement->lpfn - placement->fpfn) < bo->num_pages)
-
 
871
			return -EINVAL;
862
	lockdep_assert_held(&bo->resv->lock.base);
872
	/*
863
	/*
873
	 * Check whether we need to move buffer.
864
	 * Check whether we need to move buffer.
874
	 */
865
	 */
875
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
866
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
Line 895... Line 886...
895
	}
886
	}
896
	return 0;
887
	return 0;
897
}
888
}
898
EXPORT_SYMBOL(ttm_bo_validate);
889
EXPORT_SYMBOL(ttm_bo_validate);
Line 899... Line -...
899
 
-
 
900
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
-
 
901
				struct ttm_placement *placement)
-
 
902
{
-
 
903
	BUG_ON((placement->fpfn || placement->lpfn) &&
-
 
904
	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
-
 
905
 
-
 
906
	return 0;
-
 
907
}
-
 
908
 
890
 
909
int ttm_bo_init(struct ttm_bo_device *bdev,
891
int ttm_bo_init(struct ttm_bo_device *bdev,
910
		struct ttm_buffer_object *bo,
892
        struct ttm_buffer_object *bo,
911
		unsigned long size,
893
        unsigned long size,
912
		enum ttm_bo_type type,
894
        enum ttm_bo_type type,
913
		struct ttm_placement *placement,
895
        struct ttm_placement *placement,
914
		uint32_t page_alignment,
896
        uint32_t page_alignment,
915
		bool interruptible,
897
        bool interruptible,
916
		struct file *persistent_swap_storage,
898
        struct file *persistent_swap_storage,
917
		size_t acc_size,
899
        size_t acc_size,
-
 
900
        struct sg_table *sg,
918
		struct sg_table *sg,
901
		struct reservation_object *resv,
919
		void (*destroy) (struct ttm_buffer_object *))
902
        void (*destroy) (struct ttm_buffer_object *))
920
{
903
{
921
	int ret = 0;
904
    int ret = 0;
922
	unsigned long num_pages;
905
    unsigned long num_pages;
Line 955... Line 938...
955
	bo->priv_flags = 0;
938
    bo->priv_flags = 0;
956
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
939
    bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
957
	bo->persistent_swap_storage = persistent_swap_storage;
940
    bo->persistent_swap_storage = persistent_swap_storage;
958
	bo->acc_size = acc_size;
941
    bo->acc_size = acc_size;
959
	bo->sg = sg;
942
    bo->sg = sg;
-
 
943
	if (resv) {
-
 
944
		bo->resv = resv;
-
 
945
		lockdep_assert_held(&bo->resv->lock.base);
-
 
946
	} else {
960
	bo->resv = &bo->ttm_resv;
947
		bo->resv = &bo->ttm_resv;
961
	reservation_object_init(bo->resv);
948
		reservation_object_init(&bo->ttm_resv);
-
 
949
	}
962
	atomic_inc(&bo->glob->bo_count);
950
	atomic_inc(&bo->glob->bo_count);
963
	drm_vma_node_reset(&bo->vma_node);
951
    drm_vma_node_reset(&bo->vma_node);
Line 964... Line -...
964
 
-
 
965
	ret = ttm_bo_check_placement(bo, placement);
-
 
966
 
952
 
967
	/*
953
    /*
968
	 * For ttm_bo_type_device buffers, allocate
954
     * For ttm_bo_type_device buffers, allocate
969
	 * address space from the device.
955
     * address space from the device.
970
	 */
-
 
971
	if (likely(!ret) &&
956
     */
972
	    (bo->type == ttm_bo_type_device ||
957
	if (bo->type == ttm_bo_type_device ||
973
	     bo->type == ttm_bo_type_sg))
958
	    bo->type == ttm_bo_type_sg)
974
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
959
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
Line -... Line 960...
-
 
960
					 bo->mem.num_pages);
-
 
961
 
-
 
962
	/* passed reservation objects should already be locked,
-
 
963
	 * since otherwise lockdep will be angered in radeon.
975
					 bo->mem.num_pages);
964
	 */
976
 
965
	if (!resv) {
-
 
966
		locked = ww_mutex_trylock(&bo->resv->lock);
Line 977... Line 967...
977
	locked = ww_mutex_trylock(&bo->resv->lock);
967
		WARN_ON(!locked);
978
    WARN_ON(!locked);
968
	}
Line -... Line 969...
-
 
969
 
979
 
970
	if (likely(!ret))
Line 980... Line 971...
980
	if (likely(!ret))
971
		ret = ttm_bo_validate(bo, placement, interruptible, false);
981
		ret = ttm_bo_validate(bo, placement, interruptible, false);
972
 
Line 1116... Line 1107...
1116
	INIT_LIST_HEAD(&bdev->ddestroy);
1107
	INIT_LIST_HEAD(&bdev->ddestroy);
1117
	bdev->dev_mapping = mapping;
1108
	bdev->dev_mapping = mapping;
1118
	bdev->glob = glob;
1109
	bdev->glob = glob;
1119
	bdev->need_dma32 = need_dma32;
1110
	bdev->need_dma32 = need_dma32;
1120
	bdev->val_seq = 0;
1111
	bdev->val_seq = 0;
1121
	spin_lock_init(&bdev->fence_lock);
-
 
1122
	mutex_lock(&glob->device_list_mutex);
1112
	mutex_lock(&glob->device_list_mutex);
1123
	list_add_tail(&bdev->device_list, &glob->device_list);
1113
	list_add_tail(&bdev->device_list, &glob->device_list);
1124
	mutex_unlock(&glob->device_list_mutex);
1114
	mutex_unlock(&glob->device_list_mutex);
Line 1125... Line 1115...
1125
 
1115
 
Line 1169... Line 1159...
1169
}
1159
}
Line 1170... Line 1160...
1170
 
1160
 
Line 1171... Line -...
1171
 
-
 
1172
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1161
 
1173
 
1162
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1174
 
1163
 
1175
int ttm_bo_wait(struct ttm_buffer_object *bo,
1164
int ttm_bo_wait(struct ttm_buffer_object *bo,
1176
		bool lazy, bool interruptible, bool no_wait)
1165
		bool lazy, bool interruptible, bool no_wait)
1177
{
1166
{
1178
	struct ttm_bo_driver *driver = bo->bdev->driver;
1167
	struct reservation_object_list *fobj;
1179
	struct ttm_bo_device *bdev = bo->bdev;
-
 
1180
	void *sync_obj;
-
 
1181
	int ret = 0;
1168
	struct reservation_object *resv;
Line -... Line 1169...
-
 
1169
	struct fence *excl;
-
 
1170
	long timeout = 15 * HZ;
-
 
1171
	int i;
1182
 
1172
 
-
 
1173
	resv = bo->resv;
-
 
1174
	fobj = reservation_object_get_list(resv);
-
 
1175
	excl = reservation_object_get_excl(resv);
Line 1183... Line -...
1183
	if (likely(bo->sync_obj == NULL))
-
 
1184
		return 0;
-
 
1185
 
-
 
1186
	while (bo->sync_obj) {
-
 
1187
 
-
 
1188
		if (driver->sync_obj_signaled(bo->sync_obj)) {
1176
	if (excl) {
1189
			void *tmp_obj = bo->sync_obj;
1177
		if (!fence_is_signaled(excl)) {
1190
			bo->sync_obj = NULL;
1178
			if (no_wait)
1191
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1179
				return -EBUSY;
Line -... Line 1180...
-
 
1180
 
-
 
1181
			timeout = fence_wait_timeout(excl,
-
 
1182
						     interruptible, timeout);
-
 
1183
		}
-
 
1184
	}
-
 
1185
 
1192
			spin_unlock(&bdev->fence_lock);
1186
	for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
1193
			driver->sync_obj_unref(&tmp_obj);
1187
		struct fence *fence;
Line 1194... Line -...
1194
			spin_lock(&bdev->fence_lock);
-
 
1195
			continue;
-
 
1196
		}
1188
		fence = rcu_dereference_protected(fobj->shared[i],
1197
 
1189
						reservation_object_held(resv));
1198
		if (no_wait)
-
 
1199
			return -EBUSY;
-
 
1200
 
-
 
1201
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-
 
1202
		spin_unlock(&bdev->fence_lock);
-
 
1203
		ret = driver->sync_obj_wait(sync_obj,
-
 
1204
					    lazy, interruptible);
-
 
1205
		if (unlikely(ret != 0)) {
-
 
1206
			driver->sync_obj_unref(&sync_obj);
-
 
1207
			spin_lock(&bdev->fence_lock);
-
 
1208
	return ret;
-
 
1209
		}
-
 
1210
		spin_lock(&bdev->fence_lock);
-
 
1211
		if (likely(bo->sync_obj == sync_obj)) {
-
 
1212
			void *tmp_obj = bo->sync_obj;
-
 
1213
			bo->sync_obj = NULL;
-
 
1214
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
-
 
1215
				  &bo->priv_flags);
-
 
1216
			spin_unlock(&bdev->fence_lock);
-
 
1217
			driver->sync_obj_unref(&sync_obj);
1190
 
1218
			driver->sync_obj_unref(&tmp_obj);
1191
		if (!fence_is_signaled(fence)) {
-
 
1192
			if (no_wait)
-
 
1193
				return -EBUSY;
-
 
1194
 
-
 
1195
			timeout = fence_wait_timeout(fence,
-
 
1196
						     interruptible, timeout);
-
 
1197
		}
-
 
1198
		}
-
 
1199
 
-
 
1200
	if (timeout < 0)
1219
			spin_lock(&bdev->fence_lock);
1201
		return timeout;
1220
		} else {
1202
 
1221
			spin_unlock(&bdev->fence_lock);
1203
	if (timeout == 0)
Line -... Line 1204...
-
 
1204
			return -EBUSY;