Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 35... Line 35...
35
#include 
35
#include 
36
#include 
36
#include 
37
#include 
37
#include 
38
#include 
38
#include 
39
#include 
39
#include 
-
 
40
#include 
40
#include 
41
#include 
41
 
-
 
42
#define pr_err(fmt, ...) \
42
#include 
43
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
43
#include 
Line 44... Line 44...
44
 
44
 
45
#define TTM_ASSERT_LOCKED(param)
45
#define TTM_ASSERT_LOCKED(param)
46
#define TTM_DEBUG(fmt, arg...)
46
#define TTM_DEBUG(fmt, arg...)
Line 47... Line 47...
47
#define TTM_BO_HASH_ORDER 13
47
#define TTM_BO_HASH_ORDER 13
-
 
48
 
48
 
49
 
49
 
50
 
Line 50... Line 51...
50
 
51
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
51
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
52
					  uint32_t *mem_type)
52
{
53
{
53
	int i;
54
	int i;
54
 
55
 
55
	for (i = 0; i <= TTM_PL_PRIV5; i++)
56
	for (i = 0; i <= TTM_PL_PRIV5; i++)
56
		if (flags & (1 << i)) {
57
		if (place->flags & (1 << i)) {
Line 81... Line 82...
81
	size_t acc_size = bo->acc_size;
82
	size_t acc_size = bo->acc_size;
Line 82... Line 83...
82
 
83
 
83
	BUG_ON(atomic_read(&bo->list_kref.refcount));
84
	BUG_ON(atomic_read(&bo->list_kref.refcount));
84
	BUG_ON(atomic_read(&bo->kref.refcount));
85
	BUG_ON(atomic_read(&bo->kref.refcount));
85
	BUG_ON(atomic_read(&bo->cpu_writers));
-
 
86
	BUG_ON(bo->sync_obj != NULL);
86
	BUG_ON(atomic_read(&bo->cpu_writers));
87
	BUG_ON(bo->mem.mm_node != NULL);
87
	BUG_ON(bo->mem.mm_node != NULL);
88
	BUG_ON(!list_empty(&bo->lru));
88
	BUG_ON(!list_empty(&bo->lru));
Line 89... Line 89...
89
	BUG_ON(!list_empty(&bo->ddestroy));
89
	BUG_ON(!list_empty(&bo->ddestroy));
Line 151... Line 151...
151
}
151
}
Line 152... Line 152...
152
 
152
 
153
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
153
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
154
			 bool never_free)
154
			 bool never_free)
155
{
155
{
156
//   kref_sub(&bo->list_kref, count,
156
//	kref_sub(&bo->list_kref, count,
157
//        (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
157
//		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
Line 158... Line 158...
158
}
158
}
159
 
159
 
160
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
160
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
Line 291... Line 291...
291
	}
291
	}
Line 292... Line 292...
292
 
292
 
293
moved:
293
moved:
294
	if (bo->evicted) {
294
	if (bo->evicted) {
295
		if (bdev->driver->invalidate_caches) {
295
		if (bdev->driver->invalidate_caches) {
296
		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
296
			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
297
		if (ret)
297
			if (ret)
298
			pr_err("Can not flush read caches\n");
298
				pr_err("Can not flush read caches\n");
299
		}
299
		}
300
		bo->evicted = false;
300
		bo->evicted = false;
Line 301... Line 301...
301
	}
301
	}
Line 341... Line 341...
341
	ttm_bo_mem_put(bo, &bo->mem);
341
	ttm_bo_mem_put(bo, &bo->mem);
Line 342... Line 342...
342
 
342
 
343
	ww_mutex_unlock (&bo->resv->lock);
343
	ww_mutex_unlock (&bo->resv->lock);
Line -... Line 344...
-
 
344
}
-
 
345
 
-
 
346
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
-
 
347
{
-
 
348
	struct reservation_object_list *fobj;
-
 
349
	struct fence *fence;
-
 
350
	int i;
-
 
351
 
-
 
352
	fobj = reservation_object_get_list(bo->resv);
-
 
353
	fence = reservation_object_get_excl(bo->resv);
-
 
354
	if (fence && !fence->ops->signaled)
-
 
355
		fence_enable_sw_signaling(fence);
-
 
356
 
-
 
357
	for (i = 0; fobj && i < fobj->shared_count; ++i) {
-
 
358
		fence = rcu_dereference_protected(fobj->shared[i],
-
 
359
					reservation_object_held(bo->resv));
-
 
360
 
-
 
361
		if (!fence->ops->signaled)
-
 
362
			fence_enable_sw_signaling(fence);
-
 
363
	}
344
}
364
}
345
 
365
 
346
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
366
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
347
{
367
{
348
	struct ttm_bo_device *bdev = bo->bdev;
-
 
349
	struct ttm_bo_global *glob = bo->glob;
-
 
350
	struct ttm_bo_driver *driver = bdev->driver;
368
	struct ttm_bo_device *bdev = bo->bdev;
351
	void *sync_obj = NULL;
369
	struct ttm_bo_global *glob = bo->glob;
Line 352... Line 370...
352
	int put_count;
370
	int put_count;
353
	int ret;
371
	int ret;
Line 354... Line 372...
354
 
372
 
355
	spin_lock(&glob->lru_lock);
373
	spin_lock(&glob->lru_lock);
356
	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
-
 
357
 
-
 
358
	spin_lock(&bdev->fence_lock);
374
	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
Line 359... Line 375...
359
	(void) ttm_bo_wait(bo, false, false, true);
375
 
360
	if (!ret && !bo->sync_obj) {
376
	if (!ret) {
Line 361... Line 377...
361
		spin_unlock(&bdev->fence_lock);
377
		if (!ttm_bo_wait(bo, false, false, true)) {
Line 362... Line 378...
362
		put_count = ttm_bo_del_from_lru(bo);
378
		put_count = ttm_bo_del_from_lru(bo);
363
 
379
 
364
		spin_unlock(&glob->lru_lock);
-
 
365
		ttm_bo_cleanup_memtype_use(bo);
-
 
366
 
380
		spin_unlock(&glob->lru_lock);
367
		ttm_bo_list_ref_sub(bo, put_count, true);
-
 
368
 
-
 
Line 369... Line 381...
369
		return;
381
		ttm_bo_cleanup_memtype_use(bo);
370
	}
382
 
371
	if (bo->sync_obj)
383
		ttm_bo_list_ref_sub(bo, put_count, true);
372
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
384
 
Line 389... Line 401...
389
 
401
 
390
	kref_get(&bo->list_kref);
402
	kref_get(&bo->list_kref);
391
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
403
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
Line 392... Line -...
392
	spin_unlock(&glob->lru_lock);
-
 
393
 
-
 
394
	if (sync_obj) {
-
 
395
		driver->sync_obj_flush(sync_obj);
-
 
396
		driver->sync_obj_unref(&sync_obj);
404
	spin_unlock(&glob->lru_lock);
397
	}
405
 
398
//	schedule_delayed_work(&bdev->wq,
406
//	schedule_delayed_work(&bdev->wq,
Line 399... Line 407...
399
//			      ((HZ / 100) < 1) ? 1 : HZ / 100);
407
//			      ((HZ / 100) < 1) ? 1 : HZ / 100);
Line 413... Line 421...
413
 
421
 
414
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
422
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
415
					  bool interruptible,
423
					  bool interruptible,
416
					  bool no_wait_gpu)
424
					  bool no_wait_gpu)
417
{
-
 
418
	struct ttm_bo_device *bdev = bo->bdev;
-
 
419
	struct ttm_bo_driver *driver = bdev->driver;
425
{
420
	struct ttm_bo_global *glob = bo->glob;
426
	struct ttm_bo_global *glob = bo->glob;
421
	int put_count;
427
	int put_count;
Line 422... Line -...
422
	int ret;
-
 
423
 
428
	int ret;
Line 424... Line 429...
424
	spin_lock(&bdev->fence_lock);
429
 
425
	ret = ttm_bo_wait(bo, false, false, true);
430
	ret = ttm_bo_wait(bo, false, false, true);
426
 
-
 
427
	if (ret && !no_wait_gpu) {
-
 
428
		void *sync_obj;
-
 
429
 
-
 
430
		/*
-
 
431
		 * Take a reference to the fence and unreserve,
-
 
432
		 * at this point the buffer should be dead, so
-
 
433
		 * no new sync objects can be attached.
431
 
434
		 */
-
 
435
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-
 
436
		spin_unlock(&bdev->fence_lock);
432
	if (ret && !no_wait_gpu) {
Line 437... Line 433...
437
 
433
		long lret;
438
		__ttm_bo_unreserve(bo);
434
		ww_mutex_unlock(&bo->resv->lock);
439
		spin_unlock(&glob->lru_lock);
435
		spin_unlock(&glob->lru_lock);
440
 
436
 
441
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
437
		lret = reservation_object_wait_timeout_rcu(bo->resv,
442
		driver->sync_obj_unref(&sync_obj);
-
 
443
		if (ret)
-
 
444
			return ret;
-
 
445
 
-
 
446
		/*
438
							   true,
447
		 * remove sync_obj with ttm_bo_wait, the wait should be
-
 
448
		 * finished, and no new wait object should have been added.
439
							   interruptible,
449
		 */
-
 
450
		spin_lock(&bdev->fence_lock);
440
							   30 * HZ);
451
		ret = ttm_bo_wait(bo, false, false, true);
441
 
Line 452... Line 442...
452
		WARN_ON(ret);
442
		if (lret < 0)
453
		spin_unlock(&bdev->fence_lock);
443
			return lret;
Line 454... Line 444...
454
		if (ret)
444
		else if (lret == 0)
Line 467... Line 457...
467
		 */
457
		 */
468
		if (ret) {
458
		if (ret) {
469
			spin_unlock(&glob->lru_lock);
459
			spin_unlock(&glob->lru_lock);
470
			return 0;
460
			return 0;
471
		}
461
		}
-
 
462
 
-
 
463
		/*
-
 
464
		 * remove sync_obj with ttm_bo_wait, the wait should be
-
 
465
		 * finished, and no new wait object should have been added.
472
	} else
466
		 */
473
		spin_unlock(&bdev->fence_lock);
467
		ret = ttm_bo_wait(bo, false, false, true);
-
 
468
		WARN_ON(ret);
-
 
469
	}
Line 474... Line 470...
474
 
470
 
475
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
471
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
476
		__ttm_bo_unreserve(bo);
472
		__ttm_bo_unreserve(bo);
477
		spin_unlock(&glob->lru_lock);
473
		spin_unlock(&glob->lru_lock);
Line 599... Line 595...
599
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
595
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
600
 * space, or we've evicted everything and there isn't enough space.
596
 * space, or we've evicted everything and there isn't enough space.
601
 */
597
 */
602
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
598
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
603
					uint32_t mem_type,
599
					uint32_t mem_type,
604
					struct ttm_placement *placement,
600
					const struct ttm_place *place,
605
					struct ttm_mem_reg *mem,
601
					struct ttm_mem_reg *mem,
606
					bool interruptible,
602
					bool interruptible,
607
					bool no_wait_gpu)
603
					bool no_wait_gpu)
608
{
604
{
609
	struct ttm_bo_device *bdev = bo->bdev;
605
	struct ttm_bo_device *bdev = bo->bdev;
610
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
606
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
611
	int ret;
607
	int ret;
Line 612... Line 608...
612
 
608
 
613
	do {
609
	do {
614
		ret = (*man->func->get_node)(man, bo, placement, 0, mem);
610
		ret = (*man->func->get_node)(man, bo, place, mem);
615
		if (unlikely(ret != 0))
611
		if (unlikely(ret != 0))
616
			return ret;
612
			return ret;
617
		if (mem->mm_node)
613
		if (mem->mm_node)
618
			break;
614
			break;
Line 652... Line 648...
652
	return result;
648
	return result;
653
}
649
}
Line 654... Line 650...
654
 
650
 
655
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
651
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
656
				 uint32_t mem_type,
652
				 uint32_t mem_type,
657
				 uint32_t proposed_placement,
653
				 const struct ttm_place *place,
658
				 uint32_t *masked_placement)
654
				 uint32_t *masked_placement)
659
{
655
{
Line 660... Line 656...
660
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
656
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
661
 
657
 
Line 662... Line 658...
662
	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
658
	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
663
		return false;
659
		return false;
Line 664... Line 660...
664
 
660
 
Line 665... Line 661...
665
	if ((proposed_placement & man->available_caching) == 0)
661
	if ((place->flags & man->available_caching) == 0)
666
		return false;
662
		return false;
667
 
663
 
Line 694... Line 690...
694
	bool has_erestartsys = false;
690
	bool has_erestartsys = false;
695
	int i, ret;
691
	int i, ret;
Line 696... Line 692...
696
 
692
 
697
	mem->mm_node = NULL;
693
	mem->mm_node = NULL;
698
	for (i = 0; i < placement->num_placement; ++i) {
694
	for (i = 0; i < placement->num_placement; ++i) {
-
 
695
		const struct ttm_place *place = &placement->placement[i];
699
		ret = ttm_mem_type_from_flags(placement->placement[i],
696
 
700
						&mem_type);
697
		ret = ttm_mem_type_from_place(place, &mem_type);
701
		if (ret)
698
		if (ret)
702
			return ret;
699
			return ret;
Line 703... Line 700...
703
		man = &bdev->man[mem_type];
700
		man = &bdev->man[mem_type];
704
 
-
 
705
		type_ok = ttm_bo_mt_compatible(man,
-
 
706
						mem_type,
701
 
Line 707... Line 702...
707
						placement->placement[i],
702
		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
708
						&cur_flags);
703
						&cur_flags);
Line 714... Line 709...
714
						  cur_flags);
709
						  cur_flags);
715
		/*
710
		/*
716
		 * Use the access and other non-mapping-related flag bits from
711
		 * Use the access and other non-mapping-related flag bits from
717
		 * the memory placement flags to the current flags
712
		 * the memory placement flags to the current flags
718
		 */
713
		 */
719
		ttm_flag_masked(&cur_flags, placement->placement[i],
714
		ttm_flag_masked(&cur_flags, place->flags,
720
				~TTM_PL_MASK_MEMTYPE);
715
				~TTM_PL_MASK_MEMTYPE);
Line 721... Line 716...
721
 
716
 
722
		if (mem_type == TTM_PL_SYSTEM)
717
		if (mem_type == TTM_PL_SYSTEM)
Line 723... Line 718...
723
			break;
718
			break;
724
 
719
 
725
		if (man->has_type && man->use_type) {
720
		if (man->has_type && man->use_type) {
726
			type_found = true;
-
 
727
			ret = (*man->func->get_node)(man, bo, placement,
721
			type_found = true;
728
						     cur_flags, mem);
722
			ret = (*man->func->get_node)(man, bo, place, mem);
729
			if (unlikely(ret))
723
			if (unlikely(ret))
730
				return ret;
724
				return ret;
731
		}
725
		}
Line 741... Line 735...
741
 
735
 
742
	if (!type_found)
736
	if (!type_found)
Line 743... Line 737...
743
		return -EINVAL;
737
		return -EINVAL;
744
 
738
 
-
 
739
	for (i = 0; i < placement->num_busy_placement; ++i) {
745
	for (i = 0; i < placement->num_busy_placement; ++i) {
740
		const struct ttm_place *place = &placement->busy_placement[i];
746
		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
741
 
747
						&mem_type);
742
		ret = ttm_mem_type_from_place(place, &mem_type);
748
		if (ret)
743
		if (ret)
749
			return ret;
744
			return ret;
750
		man = &bdev->man[mem_type];
745
		man = &bdev->man[mem_type];
751
		if (!man->has_type)
746
		if (!man->has_type)
752
			continue;
-
 
753
		if (!ttm_bo_mt_compatible(man,
-
 
754
						mem_type,
-
 
755
						placement->busy_placement[i],
747
			continue;
Line 756... Line 748...
756
						&cur_flags))
748
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
757
			continue;
749
			continue;
758
 
750
 
759
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
751
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
760
						  cur_flags);
752
						  cur_flags);
761
		/*
753
		/*
762
		 * Use the access and other non-mapping-related flag bits from
754
		 * Use the access and other non-mapping-related flag bits from
763
		 * the memory placement flags to the current flags
755
		 * the memory placement flags to the current flags
Line 764... Line 756...
764
		 */
756
		 */
765
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
757
		ttm_flag_masked(&cur_flags, place->flags,
766
				~TTM_PL_MASK_MEMTYPE);
758
				~TTM_PL_MASK_MEMTYPE);
767
 
759
 
768
		if (mem_type == TTM_PL_SYSTEM) {
760
		if (mem_type == TTM_PL_SYSTEM) {
769
			mem->mem_type = mem_type;
761
			mem->mem_type = mem_type;
Line 770... Line 762...
770
			mem->placement = cur_flags;
762
			mem->placement = cur_flags;
771
			mem->mm_node = NULL;
763
			mem->mm_node = NULL;
772
			return 0;
764
			return 0;
773
		}
765
		}
774
 
766
 
775
		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
767
		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
Line 791... Line 783...
791
			bool interruptible,
783
			bool interruptible,
792
			bool no_wait_gpu)
784
			bool no_wait_gpu)
793
{
785
{
794
	int ret = 0;
786
	int ret = 0;
795
	struct ttm_mem_reg mem;
787
	struct ttm_mem_reg mem;
796
	struct ttm_bo_device *bdev = bo->bdev;
-
 
Line 797... Line 788...
797
 
788
 
Line 798... Line 789...
798
	lockdep_assert_held(&bo->resv->lock.base);
789
	lockdep_assert_held(&bo->resv->lock.base);
799
 
790
 
800
	/*
791
	/*
801
	 * FIXME: It's possible to pipeline buffer moves.
792
	 * FIXME: It's possible to pipeline buffer moves.
802
	 * Have the driver move function wait for idle when necessary,
793
	 * Have the driver move function wait for idle when necessary,
803
	 * instead of doing it here.
-
 
804
	 */
794
	 * instead of doing it here.
805
	spin_lock(&bdev->fence_lock);
-
 
806
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
795
	 */
807
	spin_unlock(&bdev->fence_lock);
796
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
808
	if (ret)
797
	if (ret)
809
		return ret;
798
		return ret;
810
	mem.num_pages = bo->num_pages;
799
	mem.num_pages = bo->num_pages;
Line 831... Line 820...
831
			      struct ttm_mem_reg *mem,
820
			      struct ttm_mem_reg *mem,
832
			      uint32_t *new_flags)
821
			      uint32_t *new_flags)
833
{
822
{
834
	int i;
823
	int i;
Line 835... Line -...
835
 
-
 
836
	if (mem->mm_node && placement->lpfn != 0 &&
-
 
837
	    (mem->start < placement->fpfn ||
-
 
838
	     mem->start + mem->num_pages > placement->lpfn))
-
 
839
		return false;
-
 
840
 
824
 
841
	for (i = 0; i < placement->num_placement; i++) {
825
	for (i = 0; i < placement->num_placement; i++) {
-
 
826
		const struct ttm_place *heap = &placement->placement[i];
-
 
827
		if (mem->mm_node &&
-
 
828
		    (mem->start < heap->fpfn ||
-
 
829
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
-
 
830
			continue;
-
 
831
 
842
		*new_flags = placement->placement[i];
832
		*new_flags = heap->flags;
843
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
833
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
844
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
834
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
845
			return true;
835
			return true;
Line 846... Line 836...
846
	}
836
	}
847
 
837
 
-
 
838
	for (i = 0; i < placement->num_busy_placement; i++) {
-
 
839
		const struct ttm_place *heap = &placement->busy_placement[i];
-
 
840
		if (mem->mm_node &&
-
 
841
		    (mem->start < heap->fpfn ||
-
 
842
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
-
 
843
			continue;
848
	for (i = 0; i < placement->num_busy_placement; i++) {
844
 
849
		*new_flags = placement->busy_placement[i];
845
		*new_flags = heap->flags;
850
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
846
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
851
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
847
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
Line 862... Line 858...
862
{
858
{
863
	int ret;
859
	int ret;
864
	uint32_t new_flags;
860
	uint32_t new_flags;
Line 865... Line 861...
865
 
861
 
866
	lockdep_assert_held(&bo->resv->lock.base);
-
 
867
	/* Check that range is valid */
-
 
868
	if (placement->lpfn || placement->fpfn)
-
 
869
		if (placement->fpfn > placement->lpfn ||
-
 
870
			(placement->lpfn - placement->fpfn) < bo->num_pages)
-
 
871
			return -EINVAL;
862
	lockdep_assert_held(&bo->resv->lock.base);
872
	/*
863
	/*
873
	 * Check whether we need to move buffer.
864
	 * Check whether we need to move buffer.
874
	 */
865
	 */
875
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
866
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
Line 895... Line 886...
895
	}
886
	}
896
	return 0;
887
	return 0;
897
}
888
}
898
EXPORT_SYMBOL(ttm_bo_validate);
889
EXPORT_SYMBOL(ttm_bo_validate);
Line 899... Line -...
899
 
-
 
900
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
-
 
901
				struct ttm_placement *placement)
-
 
902
{
-
 
903
	BUG_ON((placement->fpfn || placement->lpfn) &&
-
 
904
	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
-
 
905
 
-
 
906
	return 0;
-
 
907
}
-
 
908
 
890
 
909
int ttm_bo_init(struct ttm_bo_device *bdev,
891
int ttm_bo_init(struct ttm_bo_device *bdev,
910
		struct ttm_buffer_object *bo,
892
        struct ttm_buffer_object *bo,
911
		unsigned long size,
893
        unsigned long size,
912
		enum ttm_bo_type type,
894
        enum ttm_bo_type type,
913
		struct ttm_placement *placement,
895
        struct ttm_placement *placement,
914
		uint32_t page_alignment,
896
        uint32_t page_alignment,
915
		bool interruptible,
897
        bool interruptible,
916
		struct file *persistent_swap_storage,
898
        struct file *persistent_swap_storage,
917
		size_t acc_size,
899
        size_t acc_size,
-
 
900
        struct sg_table *sg,
918
		struct sg_table *sg,
901
		struct reservation_object *resv,
919
		void (*destroy) (struct ttm_buffer_object *))
902
        void (*destroy) (struct ttm_buffer_object *))
920
{
903
{
921
	int ret = 0;
904
    int ret = 0;
922
	unsigned long num_pages;
905
    unsigned long num_pages;
923
	bool locked;
906
    bool locked;
924
 
907
 
925
	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
908
    num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
926
	if (num_pages == 0) {
909
    if (num_pages == 0) {
927
		pr_err("Illegal buffer object size\n");
910
        pr_err("Illegal buffer object size\n");
928
		if (destroy)
911
        if (destroy)
929
			(*destroy)(bo);
912
            (*destroy)(bo);
930
		else
913
        else
931
			kfree(bo);
914
            kfree(bo);
932
		return -EINVAL;
915
        return -EINVAL;
933
	}
916
    }
Line 934... Line 917...
934
	bo->destroy = destroy;
917
    bo->destroy = destroy;
935
 
918
 
936
	kref_init(&bo->kref);
919
    kref_init(&bo->kref);
937
	kref_init(&bo->list_kref);
920
    kref_init(&bo->list_kref);
938
	atomic_set(&bo->cpu_writers, 0);
921
	atomic_set(&bo->cpu_writers, 0);
939
	INIT_LIST_HEAD(&bo->lru);
922
    INIT_LIST_HEAD(&bo->lru);
940
	INIT_LIST_HEAD(&bo->ddestroy);
923
    INIT_LIST_HEAD(&bo->ddestroy);
941
	INIT_LIST_HEAD(&bo->swap);
924
    INIT_LIST_HEAD(&bo->swap);
942
	INIT_LIST_HEAD(&bo->io_reserve_lru);
925
    INIT_LIST_HEAD(&bo->io_reserve_lru);
943
	mutex_init(&bo->wu_mutex);
926
    mutex_init(&bo->wu_mutex);
944
	bo->bdev = bdev;
927
    bo->bdev = bdev;
945
	bo->glob = bdev->glob;
928
    bo->glob = bdev->glob;
946
	bo->type = type;
929
    bo->type = type;
947
	bo->num_pages = num_pages;
930
    bo->num_pages = num_pages;
948
	bo->mem.size = num_pages << PAGE_SHIFT;
931
    bo->mem.size = num_pages << PAGE_SHIFT;
949
	bo->mem.mem_type = TTM_PL_SYSTEM;
932
    bo->mem.mem_type = TTM_PL_SYSTEM;
950
	bo->mem.num_pages = bo->num_pages;
933
    bo->mem.num_pages = bo->num_pages;
951
	bo->mem.mm_node = NULL;
934
    bo->mem.mm_node = NULL;
952
	bo->mem.page_alignment = page_alignment;
935
    bo->mem.page_alignment = page_alignment;
953
	bo->mem.bus.io_reserved_vm = false;
936
    bo->mem.bus.io_reserved_vm = false;
954
	bo->mem.bus.io_reserved_count = 0;
937
    bo->mem.bus.io_reserved_count = 0;
955
	bo->priv_flags = 0;
938
    bo->priv_flags = 0;
956
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
939
    bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
957
	bo->persistent_swap_storage = persistent_swap_storage;
940
    bo->persistent_swap_storage = persistent_swap_storage;
-
 
941
    bo->acc_size = acc_size;
-
 
942
    bo->sg = sg;
-
 
943
	if (resv) {
-
 
944
		bo->resv = resv;
958
	bo->acc_size = acc_size;
945
		lockdep_assert_held(&bo->resv->lock.base);
959
	bo->sg = sg;
946
	} else {
-
 
947
		bo->resv = &bo->ttm_resv;
960
	bo->resv = &bo->ttm_resv;
948
		reservation_object_init(&bo->ttm_resv);
961
	reservation_object_init(bo->resv);
949
	}
962
	atomic_inc(&bo->glob->bo_count);
-
 
963
	drm_vma_node_reset(&bo->vma_node);
-
 
Line 964... Line 950...
964
 
950
	atomic_inc(&bo->glob->bo_count);
965
	ret = ttm_bo_check_placement(bo, placement);
951
    drm_vma_node_reset(&bo->vma_node);
966
 
952
 
967
	/*
953
    /*
968
	 * For ttm_bo_type_device buffers, allocate
-
 
969
	 * address space from the device.
954
     * For ttm_bo_type_device buffers, allocate
970
	 */
955
     * address space from the device.
971
	if (likely(!ret) &&
956
     */
972
	    (bo->type == ttm_bo_type_device ||
957
	if (bo->type == ttm_bo_type_device ||
Line -... Line 958...
-
 
958
	    bo->type == ttm_bo_type_sg)
-
 
959
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
-
 
960
					 bo->mem.num_pages);
-
 
961
 
973
	     bo->type == ttm_bo_type_sg))
962
	/* passed reservation objects should already be locked,
974
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
963
	 * since otherwise lockdep will be angered in radeon.
-
 
964
	 */
Line 975... Line 965...
975
					 bo->mem.num_pages);
965
	if (!resv) {
976
 
966
		locked = ww_mutex_trylock(&bo->resv->lock);
Line -... Line 967...
-
 
967
		WARN_ON(!locked);
977
	locked = ww_mutex_trylock(&bo->resv->lock);
968
	}
Line 978... Line 969...
978
    WARN_ON(!locked);
969
 
979
 
970
	if (likely(!ret))
Line 980... Line 971...
980
	if (likely(!ret))
971
		ret = ttm_bo_validate(bo, placement, interruptible, false);
981
		ret = ttm_bo_validate(bo, placement, interruptible, false);
972
 
982
 
973
	if (!resv)
Line 983... Line 974...
983
	ttm_bo_unreserve(bo);
974
		ttm_bo_unreserve(bo);
984
 
975
 
Line 1019... Line 1010...
1019
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1010
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
Line 1020... Line 1011...
1020
 
1011
 
1021
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1012
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1022
			unsigned long p_size)
1013
			unsigned long p_size)
1023
{
1014
{
1024
    int ret = -EINVAL;
1015
	int ret = -EINVAL;
Line 1025... Line 1016...
1025
    struct ttm_mem_type_manager *man;
1016
	struct ttm_mem_type_manager *man;
1026
 
1017
 
1027
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1018
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1028
    man = &bdev->man[type];
1019
	man = &bdev->man[type];
1029
	BUG_ON(man->has_type);
1020
	BUG_ON(man->has_type);
1030
	man->io_reserve_fastpath = true;
1021
	man->io_reserve_fastpath = true;
1031
	man->use_io_reserve_lru = false;
1022
	man->use_io_reserve_lru = false;
Line 1032... Line 1023...
1032
	mutex_init(&man->io_reserve_mutex);
1023
	mutex_init(&man->io_reserve_mutex);
1033
	INIT_LIST_HEAD(&man->io_reserve_lru);
1024
	INIT_LIST_HEAD(&man->io_reserve_lru);
1034
 
1025
 
1035
    ret = bdev->driver->init_mem_type(bdev, type, man);
1026
	ret = bdev->driver->init_mem_type(bdev, type, man);
Line 1036... Line 1027...
1036
    if (ret)
1027
	if (ret)
1037
        return ret;
1028
		return ret;
1038
	man->bdev = bdev;
1029
	man->bdev = bdev;
1039
 
1030
 
1040
    ret = 0;
1031
	ret = 0;
1041
    if (type != TTM_PL_SYSTEM) {
1032
	if (type != TTM_PL_SYSTEM) {
1042
		ret = (*man->func->init)(man, p_size);
1033
		ret = (*man->func->init)(man, p_size);
1043
        if (ret)
1034
		if (ret)
1044
            return ret;
1035
			return ret;
Line 1045... Line 1036...
1045
    }
1036
	}
Line 1046... Line 1037...
1046
    man->has_type = true;
1037
	man->has_type = true;
1047
    man->use_type = true;
1038
	man->use_type = true;
1048
    man->size = p_size;
1039
	man->size = p_size;
1049
 
1040
 
1050
    INIT_LIST_HEAD(&man->lru);
1041
	INIT_LIST_HEAD(&man->lru);
1051
 
1042
 
Line 1064... Line 1055...
1064
    struct ttm_bo_global_ref *bo_ref =
1055
    struct ttm_bo_global_ref *bo_ref =
1065
        container_of(ref, struct ttm_bo_global_ref, ref);
1056
        container_of(ref, struct ttm_bo_global_ref, ref);
1066
    struct ttm_bo_global *glob = ref->object;
1057
    struct ttm_bo_global *glob = ref->object;
1067
    int ret;
1058
    int ret;
Line 1068... Line 1059...
1068
 
1059
 
1069
	mutex_init(&glob->device_list_mutex);
1060
    mutex_init(&glob->device_list_mutex);
1070
	spin_lock_init(&glob->lru_lock);
1061
    spin_lock_init(&glob->lru_lock);
1071
    glob->mem_glob = bo_ref->mem_glob;
1062
    glob->mem_glob = bo_ref->mem_glob;
Line 1072... Line 1063...
1072
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1063
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1073
 
1064
 
Line 1116... Line 1107...
1116
	INIT_LIST_HEAD(&bdev->ddestroy);
1107
	INIT_LIST_HEAD(&bdev->ddestroy);
1117
	bdev->dev_mapping = mapping;
1108
	bdev->dev_mapping = mapping;
1118
	bdev->glob = glob;
1109
	bdev->glob = glob;
1119
	bdev->need_dma32 = need_dma32;
1110
	bdev->need_dma32 = need_dma32;
1120
	bdev->val_seq = 0;
1111
	bdev->val_seq = 0;
1121
	spin_lock_init(&bdev->fence_lock);
-
 
1122
	mutex_lock(&glob->device_list_mutex);
1112
	mutex_lock(&glob->device_list_mutex);
1123
	list_add_tail(&bdev->device_list, &glob->device_list);
1113
	list_add_tail(&bdev->device_list, &glob->device_list);
1124
	mutex_unlock(&glob->device_list_mutex);
1114
	mutex_unlock(&glob->device_list_mutex);
Line 1125... Line 1115...
1125
 
1115
 
Line 1169... Line 1159...
1169
}
1159
}
Line 1170... Line 1160...
1170
 
1160
 
Line 1171... Line -...
1171
 
-
 
1172
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1161
 
1173
 
1162
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1174
 
1163
 
1175
int ttm_bo_wait(struct ttm_buffer_object *bo,
1164
int ttm_bo_wait(struct ttm_buffer_object *bo,
1176
		bool lazy, bool interruptible, bool no_wait)
1165
		bool lazy, bool interruptible, bool no_wait)
1177
{
1166
{
1178
	struct ttm_bo_driver *driver = bo->bdev->driver;
1167
	struct reservation_object_list *fobj;
1179
	struct ttm_bo_device *bdev = bo->bdev;
-
 
1180
	void *sync_obj;
-
 
1181
	int ret = 0;
1168
	struct reservation_object *resv;
Line -... Line 1169...
-
 
1169
	struct fence *excl;
-
 
1170
	long timeout = 15 * HZ;
-
 
1171
	int i;
1182
 
1172
 
-
 
1173
	resv = bo->resv;
-
 
1174
	fobj = reservation_object_get_list(resv);
-
 
1175
	excl = reservation_object_get_excl(resv);
Line 1183... Line -...
1183
	if (likely(bo->sync_obj == NULL))
-
 
1184
		return 0;
-
 
1185
 
-
 
1186
	while (bo->sync_obj) {
-
 
1187
 
-
 
1188
		if (driver->sync_obj_signaled(bo->sync_obj)) {
1176
	if (excl) {
1189
			void *tmp_obj = bo->sync_obj;
1177
		if (!fence_is_signaled(excl)) {
1190
			bo->sync_obj = NULL;
-
 
1191
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1178
			if (no_wait)
-
 
1179
				return -EBUSY;
Line -... Line 1180...
-
 
1180
 
-
 
1181
			timeout = fence_wait_timeout(excl,
-
 
1182
						     interruptible, timeout);
-
 
1183
		}
-
 
1184
	}
-
 
1185
 
1192
			spin_unlock(&bdev->fence_lock);
1186
	for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
1193
			driver->sync_obj_unref(&tmp_obj);
1187
		struct fence *fence;
Line 1194... Line -...
1194
			spin_lock(&bdev->fence_lock);
-
 
1195
			continue;
-
 
1196
		}
1188
		fence = rcu_dereference_protected(fobj->shared[i],
1197
 
1189
						reservation_object_held(resv));
1198
		if (no_wait)
-
 
1199
			return -EBUSY;
-
 
1200
 
-
 
1201
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-
 
1202
		spin_unlock(&bdev->fence_lock);
1190
 
1203
		ret = driver->sync_obj_wait(sync_obj,
-
 
1204
					    lazy, interruptible);
-
 
1205
		if (unlikely(ret != 0)) {
-
 
1206
			driver->sync_obj_unref(&sync_obj);
-
 
1207
			spin_lock(&bdev->fence_lock);
-
 
1208
	return ret;
-
 
1209
		}
-
 
1210
		spin_lock(&bdev->fence_lock);
-
 
1211
		if (likely(bo->sync_obj == sync_obj)) {
-
 
1212
			void *tmp_obj = bo->sync_obj;
-
 
1213
			bo->sync_obj = NULL;
-
 
1214
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
-
 
1215
				  &bo->priv_flags);
-
 
1216
			spin_unlock(&bdev->fence_lock);
-
 
1217
			driver->sync_obj_unref(&sync_obj);
1191
		if (!fence_is_signaled(fence)) {
-
 
1192
			if (no_wait)
-
 
1193
				return -EBUSY;
-
 
1194
 
1218
			driver->sync_obj_unref(&tmp_obj);
1195
			timeout = fence_wait_timeout(fence,
-
 
1196
						     interruptible, timeout);
-
 
1197
		}
-
 
1198
		}
-
 
1199
 
-
 
1200
	if (timeout < 0)
1219
			spin_lock(&bdev->fence_lock);
1201
		return timeout;
1220
		} else {
1202
 
1221
			spin_unlock(&bdev->fence_lock);
1203
	if (timeout == 0)
Line -... Line 1204...
-
 
1204
			return -EBUSY;