Subversion Repositories Kolibri OS

Rev

Rev 4569 | Rev 5271 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4569 Rev 5078
Line 37... Line 37...
37
#include 
37
#include 
38
#include 
38
#include 
39
#include 
39
#include 
40
#include 
40
#include 
Line -... Line 41...
-
 
41
 
-
 
42
#define pr_err(fmt, ...) \
-
 
43
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
41
 
44
 
42
#define TTM_ASSERT_LOCKED(param)
45
#define TTM_ASSERT_LOCKED(param)
43
#define TTM_DEBUG(fmt, arg...)
46
#define TTM_DEBUG(fmt, arg...)
Line 44... Line -...
44
#define TTM_BO_HASH_ORDER 13
-
 
45
 
-
 
Line -... Line 47...
-
 
47
#define TTM_BO_HASH_ORDER 13
46
#define pr_err(fmt, ...) \
48
 
47
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
49
 
-
 
50
 
Line 48... Line 51...
48
 
51
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
-
 
52
{
-
 
53
	int i;
49
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
54
 
50
{
55
	for (i = 0; i <= TTM_PL_PRIV5; i++)
51
 
-
 
52
    mutex_lock(&man->io_reserve_mutex);
-
 
53
    return 0;
-
 
54
}
-
 
55
 
56
		if (flags & (1 << i)) {
56
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
-
 
57
{
-
 
58
    if (likely(man->io_reserve_fastpath))
57
			*mem_type = i;
Line 59... Line -...
59
        return;
-
 
60
 
-
 
61
    mutex_unlock(&man->io_reserve_mutex);
-
 
62
}
-
 
63
 
-
 
64
 
-
 
65
#if 0
-
 
66
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
-
 
67
{
-
 
68
    struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-
 
69
 
-
 
70
    pr_err("    has_type: %d\n", man->has_type);
-
 
71
    pr_err("    use_type: %d\n", man->use_type);
-
 
72
    pr_err("    flags: 0x%08X\n", man->flags);
-
 
73
    pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
-
 
74
    pr_err("    size: %llu\n", man->size);
-
 
75
    pr_err("    available_caching: 0x%08X\n", man->available_caching);
-
 
76
    pr_err("    default_caching: 0x%08X\n", man->default_caching);
-
 
77
    if (mem_type != TTM_PL_SYSTEM)
-
 
78
        (*man->func->debug)(man, TTM_PFX);
-
 
Line 79... Line -...
79
}
-
 
80
 
-
 
81
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
-
 
82
                    struct ttm_placement *placement)
-
 
83
{
-
 
84
    int i, ret, mem_type;
-
 
85
 
-
 
86
    pr_err("No space for %p (%lu pages, %luK, %luM)\n",
-
 
87
           bo, bo->mem.num_pages, bo->mem.size >> 10,
-
 
88
           bo->mem.size >> 20);
-
 
89
    for (i = 0; i < placement->num_placement; i++) {
-
 
90
        ret = ttm_mem_type_from_flags(placement->placement[i],
-
 
91
                        &mem_type);
-
 
Line 92... Line -...
92
        if (ret)
-
 
93
            return;
-
 
94
        pr_err("  placement[%d]=0x%08X (%d)\n",
-
 
95
               i, placement->placement[i], mem_type);
-
 
96
        ttm_mem_type_debug(bo->bdev, mem_type);
-
 
97
    }
-
 
Line 98... Line -...
98
}
-
 
99
 
-
 
100
static ssize_t ttm_bo_global_show(struct kobject *kobj,
-
 
Line 101... Line -...
101
                  struct attribute *attr,
-
 
102
                  char *buffer)
-
 
103
{
-
 
104
    struct ttm_bo_global *glob =
-
 
Line 105... Line -...
105
        container_of(kobj, struct ttm_bo_global, kobj);
-
 
106
 
-
 
107
    return snprintf(buffer, PAGE_SIZE, "%lu\n",
-
 
108
            (unsigned long) atomic_read(&glob->bo_count));
-
 
109
}
-
 
110
 
-
 
111
static struct attribute *ttm_bo_global_attrs[] = {
-
 
112
    &ttm_bo_count,
-
 
113
    NULL
-
 
114
};
-
 
Line 115... Line 58...
115
 
58
    return 0;
116
static const struct sysfs_ops ttm_bo_global_ops = {
59
		}
117
    .show = &ttm_bo_global_show
60
	return -EINVAL;
Line 146... Line 89...
146
	BUG_ON(!list_empty(&bo->ddestroy));
89
	BUG_ON(!list_empty(&bo->ddestroy));
Line 147... Line 90...
147
 
90
 
148
	if (bo->ttm)
91
	if (bo->ttm)
149
		ttm_tt_destroy(bo->ttm);
92
		ttm_tt_destroy(bo->ttm);
-
 
93
	atomic_dec(&bo->glob->bo_count);
-
 
94
	if (bo->resv == &bo->ttm_resv)
-
 
95
		reservation_object_fini(&bo->ttm_resv);
150
	atomic_dec(&bo->glob->bo_count);
96
	mutex_destroy(&bo->wu_mutex);
151
	if (bo->destroy)
97
	if (bo->destroy)
152
		bo->destroy(bo);
98
		bo->destroy(bo);
153
	else {
99
	else {
154
		kfree(bo);
100
		kfree(bo);
155
	}
-
 
156
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
101
	}
Line 157... Line 102...
157
}
102
}
158
 
103
 
159
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
104
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
160
{
105
{
Line 161... Line 106...
161
	struct ttm_bo_device *bdev = bo->bdev;
106
	struct ttm_bo_device *bdev = bo->bdev;
Line 162... Line 107...
162
	struct ttm_mem_type_manager *man;
107
	struct ttm_mem_type_manager *man;
Line 163... Line 108...
163
 
108
 
Line 266... Line 211...
266
	}
211
	}
Line 267... Line 212...
267
 
212
 
268
	return ret;
213
	return ret;
Line 269... Line -...
269
}
-
 
270
 
214
}
271
#if 0
215
 
272
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
216
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
273
				  struct ttm_mem_reg *mem,
217
				  struct ttm_mem_reg *mem,
274
				  bool evict, bool interruptible,
218
				  bool evict, bool interruptible,
Line 346... Line 290...
346
		goto out_err;
290
		goto out_err;
347
	}
291
	}
Line 348... Line 292...
348
 
292
 
349
moved:
293
moved:
-
 
294
	if (bo->evicted) {
350
	if (bo->evicted) {
295
		if (bdev->driver->invalidate_caches) {
351
		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
296
		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
352
		if (ret)
297
		if (ret)
-
 
298
			pr_err("Can not flush read caches\n");
353
			pr_err("Can not flush read caches\n");
299
		}
354
		bo->evicted = false;
300
		bo->evicted = false;
Line 355... Line 301...
355
	}
301
	}
356
 
302
 
Line 405... Line 351...
405
	void *sync_obj = NULL;
351
	void *sync_obj = NULL;
406
	int put_count;
352
	int put_count;
407
	int ret;
353
	int ret;
Line 408... Line 354...
408
 
354
 
409
	spin_lock(&glob->lru_lock);
355
	spin_lock(&glob->lru_lock);
Line 410... Line 356...
410
	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
356
	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
411
 
357
 
412
	spin_lock(&bdev->fence_lock);
358
	spin_lock(&bdev->fence_lock);
413
	(void) ttm_bo_wait(bo, false, false, true);
359
	(void) ttm_bo_wait(bo, false, false, true);
Line 436... Line 382...
436
		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
382
		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
437
			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
383
			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
438
			ttm_bo_add_to_lru(bo);
384
			ttm_bo_add_to_lru(bo);
439
		}
385
		}
Line 440... Line 386...
440
 
386
 
441
		ww_mutex_unlock(&bo->resv->lock);
387
		__ttm_bo_unreserve(bo);
Line 442... Line 388...
442
	}
388
	}
443
 
389
 
444
	kref_get(&bo->list_kref);
390
	kref_get(&bo->list_kref);
Line 445... Line 391...
445
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
391
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
446
	spin_unlock(&glob->lru_lock);
392
	spin_unlock(&glob->lru_lock);
447
 
393
 
448
	if (sync_obj) {
394
	if (sync_obj) {
449
		driver->sync_obj_flush(sync_obj);
395
		driver->sync_obj_flush(sync_obj);
450
		driver->sync_obj_unref(&sync_obj);
396
		driver->sync_obj_unref(&sync_obj);
451
	}
397
	}
Line 452... Line 398...
452
	schedule_delayed_work(&bdev->wq,
398
//	schedule_delayed_work(&bdev->wq,
453
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
399
//			      ((HZ / 100) < 1) ? 1 : HZ / 100);
454
}
400
}
Line 487... Line 433...
487
		 * no new sync objects can be attached.
433
		 * no new sync objects can be attached.
488
		 */
434
		 */
489
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
435
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
490
		spin_unlock(&bdev->fence_lock);
436
		spin_unlock(&bdev->fence_lock);
Line 491... Line 437...
491
 
437
 
492
		ww_mutex_unlock(&bo->resv->lock);
438
		__ttm_bo_unreserve(bo);
Line 493... Line 439...
493
		spin_unlock(&glob->lru_lock);
439
		spin_unlock(&glob->lru_lock);
494
 
440
 
495
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
441
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
Line 507... Line 453...
507
		spin_unlock(&bdev->fence_lock);
453
		spin_unlock(&bdev->fence_lock);
508
		if (ret)
454
		if (ret)
509
			return ret;
455
			return ret;
Line 510... Line 456...
510
 
456
 
511
		spin_lock(&glob->lru_lock);
457
		spin_lock(&glob->lru_lock);
Line 512... Line 458...
512
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
458
		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
513
 
459
 
514
		/*
460
		/*
515
		 * We raced, and lost, someone else holds the reservation now,
461
		 * We raced, and lost, someone else holds the reservation now,
Line 525... Line 471...
525
		}
471
		}
526
	} else
472
	} else
527
		spin_unlock(&bdev->fence_lock);
473
		spin_unlock(&bdev->fence_lock);
Line 528... Line 474...
528
 
474
 
529
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
475
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
530
		ww_mutex_unlock(&bo->resv->lock);
476
		__ttm_bo_unreserve(bo);
531
		spin_unlock(&glob->lru_lock);
477
		spin_unlock(&glob->lru_lock);
532
		return ret;
478
		return ret;
Line 533... Line 479...
533
	}
479
	}
Line 570... Line 516...
570
			nentry = list_first_entry(&entry->ddestroy,
516
			nentry = list_first_entry(&entry->ddestroy,
571
				struct ttm_buffer_object, ddestroy);
517
				struct ttm_buffer_object, ddestroy);
572
			kref_get(&nentry->list_kref);
518
			kref_get(&nentry->list_kref);
573
		}
519
		}
Line 574... Line 520...
574
 
520
 
575
		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
521
		ret = __ttm_bo_reserve(entry, false, true, false, NULL);
576
		if (remove_all && ret) {
522
		if (remove_all && ret) {
577
			spin_unlock(&glob->lru_lock);
523
			spin_unlock(&glob->lru_lock);
578
			ret = ttm_bo_reserve_nolru(entry, false, false,
524
			ret = __ttm_bo_reserve(entry, false, false,
579
						   false, 0);
525
					       false, NULL);
580
			spin_lock(&glob->lru_lock);
526
			spin_lock(&glob->lru_lock);
Line 581... Line 527...
581
		}
527
		}
582
 
528
 
Line 613... Line 559...
613
	if (ttm_bo_delayed_delete(bdev, false)) {
559
	if (ttm_bo_delayed_delete(bdev, false)) {
614
		schedule_delayed_work(&bdev->wq,
560
		schedule_delayed_work(&bdev->wq,
615
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
561
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
616
	}
562
	}
617
}
563
}
618
#endif
-
 
Line 619... Line 564...
619
 
564
 
620
static void ttm_bo_release(struct kref *kref)
565
static void ttm_bo_release(struct kref *kref)
621
{
566
{
622
	struct ttm_buffer_object *bo =
567
	struct ttm_buffer_object *bo =
623
	    container_of(kref, struct ttm_buffer_object, kref);
568
	    container_of(kref, struct ttm_buffer_object, kref);
624
	struct ttm_bo_device *bdev = bo->bdev;
569
	struct ttm_bo_device *bdev = bo->bdev;
Line 625... Line 570...
625
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
570
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
626
 
571
 
627
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
572
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
628
	ttm_mem_io_lock(man, false);
573
	ttm_mem_io_lock(man, false);
629
//   ttm_mem_io_free_vm(bo);
574
	ttm_mem_io_free_vm(bo);
630
	ttm_mem_io_unlock(man);
575
	ttm_mem_io_unlock(man);
631
//   ttm_bo_cleanup_refs_or_queue(bo);
576
	ttm_bo_cleanup_refs_or_queue(bo);
Line 632... Line 577...
632
//   kref_put(&bo->list_kref, ttm_bo_release_list);
577
	kref_put(&bo->list_kref, ttm_bo_release_list);
633
}
578
}
634
 
579
 
Line 639... Line 584...
639
	*p_bo = NULL;
584
	*p_bo = NULL;
640
	kref_put(&bo->kref, ttm_bo_release);
585
	kref_put(&bo->kref, ttm_bo_release);
641
}
586
}
642
EXPORT_SYMBOL(ttm_bo_unref);
587
EXPORT_SYMBOL(ttm_bo_unref);
Line 643... Line -...
643
 
-
 
644
#if 0
-
 
645
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
-
 
646
{
-
 
647
	return cancel_delayed_work_sync(&bdev->wq);
-
 
648
}
-
 
649
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
-
 
650
 
-
 
651
void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
-
 
652
{
-
 
653
	if (resched)
-
 
654
		schedule_delayed_work(&bdev->wq,
-
 
655
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
-
 
656
}
-
 
657
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-
 
658
 
-
 
659
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-
 
660
			bool no_wait_gpu)
-
 
661
{
-
 
662
	struct ttm_bo_device *bdev = bo->bdev;
-
 
663
	struct ttm_mem_reg evict_mem;
-
 
664
	struct ttm_placement placement;
-
 
665
	int ret = 0;
-
 
666
 
-
 
667
	spin_lock(&bdev->fence_lock);
-
 
668
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-
 
669
	spin_unlock(&bdev->fence_lock);
-
 
670
 
-
 
671
	if (unlikely(ret != 0)) {
-
 
672
		if (ret != -ERESTARTSYS) {
-
 
673
			pr_err("Failed to expire sync object before buffer eviction\n");
-
 
674
		}
-
 
675
		goto out;
-
 
676
	}
-
 
677
 
-
 
678
//	BUG_ON(!ttm_bo_is_reserved(bo));
-
 
679
 
-
 
680
	evict_mem = bo->mem;
-
 
681
	evict_mem.mm_node = NULL;
-
 
682
	evict_mem.bus.io_reserved_vm = false;
-
 
683
	evict_mem.bus.io_reserved_count = 0;
-
 
684
 
-
 
685
	placement.fpfn = 0;
-
 
686
	placement.lpfn = 0;
-
 
687
	placement.num_placement = 0;
-
 
688
	placement.num_busy_placement = 0;
-
 
689
	bdev->driver->evict_flags(bo, &placement);
-
 
690
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-
 
691
				no_wait_gpu);
-
 
692
	if (ret) {
-
 
693
		if (ret != -ERESTARTSYS) {
-
 
694
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
-
 
695
			       bo);
-
 
696
			ttm_bo_mem_space_debug(bo, &placement);
-
 
697
		}
-
 
698
		goto out;
-
 
699
	}
-
 
700
 
-
 
701
	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-
 
702
				     no_wait_gpu);
-
 
703
	if (ret) {
-
 
704
		if (ret != -ERESTARTSYS)
-
 
705
			pr_err("Buffer eviction failed\n");
-
 
706
		ttm_bo_mem_put(bo, &evict_mem);
-
 
707
		goto out;
-
 
708
	}
-
 
709
	bo->evicted = true;
-
 
710
out:
-
 
711
	return ret;
-
 
712
}
-
 
713
 
-
 
714
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
-
 
715
				uint32_t mem_type,
-
 
716
				bool interruptible,
-
 
717
				bool no_wait_gpu)
-
 
718
{
-
 
719
	struct ttm_bo_global *glob = bdev->glob;
-
 
720
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-
 
721
	struct ttm_buffer_object *bo;
-
 
722
	int ret = -EBUSY, put_count;
-
 
723
 
-
 
724
	spin_lock(&glob->lru_lock);
-
 
725
	list_for_each_entry(bo, &man->lru, lru) {
-
 
726
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
-
 
727
		if (!ret)
-
 
728
			break;
-
 
729
	}
-
 
730
 
-
 
731
	if (ret) {
-
 
732
		spin_unlock(&glob->lru_lock);
-
 
733
		return ret;
-
 
734
	}
-
 
735
 
-
 
736
	kref_get(&bo->list_kref);
-
 
737
 
-
 
738
	if (!list_empty(&bo->ddestroy)) {
-
 
739
		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
-
 
740
						     no_wait_gpu);
-
 
741
		kref_put(&bo->list_kref, ttm_bo_release_list);
-
 
742
		return ret;
-
 
743
	}
-
 
744
 
-
 
745
	put_count = ttm_bo_del_from_lru(bo);
-
 
746
	spin_unlock(&glob->lru_lock);
-
 
747
 
-
 
748
	BUG_ON(ret != 0);
-
 
749
 
-
 
750
	ttm_bo_list_ref_sub(bo, put_count, true);
-
 
751
 
-
 
752
	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
-
 
753
	ttm_bo_unreserve(bo);
-
 
754
 
-
 
755
	kref_put(&bo->list_kref, ttm_bo_release_list);
-
 
756
	return ret;
-
 
757
}
-
 
758
 
588
 
759
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
589
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
760
{
590
{
Line 761... Line 591...
761
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
591
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
Line 779... Line 609...
779
	struct ttm_bo_device *bdev = bo->bdev;
609
	struct ttm_bo_device *bdev = bo->bdev;
780
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
610
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
781
	int ret;
611
	int ret;
Line 782... Line 612...
782
 
612
 
783
	do {
613
	do {
784
		ret = (*man->func->get_node)(man, bo, placement, mem);
614
		ret = (*man->func->get_node)(man, bo, placement, 0, mem);
785
		if (unlikely(ret != 0))
615
		if (unlikely(ret != 0))
786
			return ret;
616
			return ret;
787
		if (mem->mm_node)
617
		if (mem->mm_node)
788
			break;
618
			break;
789
		ret = ttm_mem_evict_first(bdev, mem_type,
619
//		ret = ttm_mem_evict_first(bdev, mem_type,
790
					  interruptible, no_wait_gpu);
620
//					  interruptible, no_wait_gpu);
791
		if (unlikely(ret != 0))
621
//		if (unlikely(ret != 0))
792
			return ret;
622
//			return ret;
793
	} while (1);
623
	} while (1);
794
	if (mem->mm_node == NULL)
624
	if (mem->mm_node == NULL)
795
		return -ENOMEM;
625
		return -ENOMEM;
796
	mem->mem_type = mem_type;
626
	mem->mem_type = mem_type;
Line 892... Line 722...
892
		if (mem_type == TTM_PL_SYSTEM)
722
		if (mem_type == TTM_PL_SYSTEM)
893
			break;
723
			break;
Line 894... Line 724...
894
 
724
 
895
		if (man->has_type && man->use_type) {
725
		if (man->has_type && man->use_type) {
896
			type_found = true;
726
			type_found = true;
-
 
727
			ret = (*man->func->get_node)(man, bo, placement,
897
			ret = (*man->func->get_node)(man, bo, placement, mem);
728
						     cur_flags, mem);
898
			if (unlikely(ret))
729
			if (unlikely(ret))
899
				return ret;
730
				return ret;
900
		}
731
		}
901
		if (mem->mm_node)
732
		if (mem->mm_node)
Line 932... Line 763...
932
		 * the memory placement flags to the current flags
763
		 * the memory placement flags to the current flags
933
		 */
764
		 */
934
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
765
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
935
				~TTM_PL_MASK_MEMTYPE);
766
				~TTM_PL_MASK_MEMTYPE);
Line 936... Line -...
936
 
-
 
937
 
767
 
938
		if (mem_type == TTM_PL_SYSTEM) {
768
		if (mem_type == TTM_PL_SYSTEM) {
939
			mem->mem_type = mem_type;
769
			mem->mem_type = mem_type;
940
			mem->placement = cur_flags;
770
			mem->placement = cur_flags;
941
			mem->mm_node = NULL;
771
			mem->mm_node = NULL;
Line 963... Line 793...
963
{
793
{
964
	int ret = 0;
794
	int ret = 0;
965
	struct ttm_mem_reg mem;
795
	struct ttm_mem_reg mem;
966
	struct ttm_bo_device *bdev = bo->bdev;
796
	struct ttm_bo_device *bdev = bo->bdev;
Line 967... Line 797...
967
 
797
 
Line 968... Line 798...
968
//	BUG_ON(!ttm_bo_is_reserved(bo));
798
	lockdep_assert_held(&bo->resv->lock.base);
969
 
799
 
970
	/*
800
	/*
971
	 * FIXME: It's possible to pipeline buffer moves.
801
	 * FIXME: It's possible to pipeline buffer moves.
Line 994... Line 824...
994
out_unlock:
824
out_unlock:
995
	if (ret && mem.mm_node)
825
	if (ret && mem.mm_node)
996
		ttm_bo_mem_put(bo, &mem);
826
		ttm_bo_mem_put(bo, &mem);
997
	return ret;
827
	return ret;
998
}
828
}
999
#endif
-
 
Line 1000... Line 829...
1000
 
829
 
1001
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
830
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1002
			      struct ttm_mem_reg *mem,
831
			      struct ttm_mem_reg *mem,
1003
			      uint32_t *new_flags)
832
			      uint32_t *new_flags)
Line 1032... Line 861...
1032
			bool no_wait_gpu)
861
			bool no_wait_gpu)
1033
{
862
{
1034
	int ret;
863
	int ret;
1035
	uint32_t new_flags;
864
	uint32_t new_flags;
Line 1036... Line 865...
1036
 
865
 
1037
//	BUG_ON(!ttm_bo_is_reserved(bo));
866
	lockdep_assert_held(&bo->resv->lock.base);
1038
	/* Check that range is valid */
867
	/* Check that range is valid */
1039
	if (placement->lpfn || placement->fpfn)
868
	if (placement->lpfn || placement->fpfn)
1040
		if (placement->fpfn > placement->lpfn ||
869
		if (placement->fpfn > placement->lpfn ||
1041
			(placement->lpfn - placement->fpfn) < bo->num_pages)
870
			(placement->lpfn - placement->fpfn) < bo->num_pages)
1042
			return -EINVAL;
871
			return -EINVAL;
1043
	/*
872
	/*
1044
	 * Check whether we need to move buffer.
873
	 * Check whether we need to move buffer.
1045
	 */
874
	 */
1046
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
875
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1047
//       ret = ttm_bo_move_buffer(bo, placement, interruptible,
876
		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1048
//                    no_wait_gpu);
877
					 no_wait_gpu);
1049
		if (ret)
878
		if (ret)
1050
			return ret;
879
			return ret;
1051
	} else {
880
	} else {
1052
		/*
881
		/*
Line 1089... Line 918...
1089
		struct sg_table *sg,
918
		struct sg_table *sg,
1090
		void (*destroy) (struct ttm_buffer_object *))
919
		void (*destroy) (struct ttm_buffer_object *))
1091
{
920
{
1092
	int ret = 0;
921
	int ret = 0;
1093
	unsigned long num_pages;
922
	unsigned long num_pages;
1094
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
-
 
1095
	bool locked;
923
	bool locked;
Line 1096... Line -...
1096
 
-
 
1097
//   ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
-
 
1098
	if (ret) {
-
 
1099
		pr_err("Out of kernel memory\n");
-
 
1100
		if (destroy)
-
 
1101
			(*destroy)(bo);
-
 
1102
		else
-
 
1103
			kfree(bo);
-
 
1104
		return -ENOMEM;
-
 
1105
	}
-
 
1106
 
924
 
1107
	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
925
	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1108
	if (num_pages == 0) {
926
	if (num_pages == 0) {
1109
		pr_err("Illegal buffer object size\n");
927
		pr_err("Illegal buffer object size\n");
1110
		if (destroy)
928
		if (destroy)
1111
			(*destroy)(bo);
929
			(*destroy)(bo);
1112
		else
930
		else
1113
			kfree(bo);
-
 
1114
//       ttm_mem_global_free(mem_glob, acc_size);
931
			kfree(bo);
1115
		return -EINVAL;
932
		return -EINVAL;
1116
	}
933
	}
Line 1117... Line 934...
1117
	bo->destroy = destroy;
934
	bo->destroy = destroy;
Line 1139... Line 956...
1139
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
956
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1140
	bo->persistent_swap_storage = persistent_swap_storage;
957
	bo->persistent_swap_storage = persistent_swap_storage;
1141
	bo->acc_size = acc_size;
958
	bo->acc_size = acc_size;
1142
	bo->sg = sg;
959
	bo->sg = sg;
1143
	bo->resv = &bo->ttm_resv;
960
	bo->resv = &bo->ttm_resv;
1144
//   reservation_object_init(bo->resv);
961
	reservation_object_init(bo->resv);
1145
	atomic_inc(&bo->glob->bo_count);
962
	atomic_inc(&bo->glob->bo_count);
1146
	drm_vma_node_reset(&bo->vma_node);
963
	drm_vma_node_reset(&bo->vma_node);
Line 1147... Line 964...
1147
 
964
 
Line 1148... Line 965...
1148
	ret = ttm_bo_check_placement(bo, placement);
965
	ret = ttm_bo_check_placement(bo, placement);
1149
 
966
 
1150
	/*
967
	/*
1151
	 * For ttm_bo_type_device buffers, allocate
968
	 * For ttm_bo_type_device buffers, allocate
1152
	 * address space from the device.
969
	 * address space from the device.
1153
	 */
970
	 */
1154
//   if (likely(!ret) &&
971
	if (likely(!ret) &&
-
 
972
	    (bo->type == ttm_bo_type_device ||
1155
//       (bo->type == ttm_bo_type_device ||
973
	     bo->type == ttm_bo_type_sg))
Line 1156... Line 974...
1156
//        bo->type == ttm_bo_type_sg))
974
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1157
//       ret = ttm_bo_setup_vm(bo);
975
					 bo->mem.num_pages);
Line 1158... Line 976...
1158
 
976
 
-
 
977
	locked = ww_mutex_trylock(&bo->resv->lock);
Line -... Line 978...
-
 
978
    WARN_ON(!locked);
-
 
979
 
1159
//   if (likely(!ret))
980
	if (likely(!ret))
1160
//   ret = ttm_bo_validate(bo, placement, interruptible, false);
981
		ret = ttm_bo_validate(bo, placement, interruptible, false);
Line 1161... Line 982...
1161
 
982
 
1162
//   ttm_bo_unreserve(bo);
983
	ttm_bo_unreserve(bo);
1163
 
984
 
Line 1195... Line 1016...
1195
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1016
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1196
	return size;
1017
	return size;
1197
}
1018
}
1198
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1019
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
Line 1199... Line -...
1199
 
-
 
1200
int ttm_bo_create(struct ttm_bo_device *bdev,
-
 
1201
			unsigned long size,
-
 
1202
			enum ttm_bo_type type,
-
 
1203
			struct ttm_placement *placement,
-
 
1204
			uint32_t page_alignment,
-
 
1205
			bool interruptible,
-
 
1206
			struct file *persistent_swap_storage,
-
 
1207
			struct ttm_buffer_object **p_bo)
-
 
1208
{
-
 
1209
	struct ttm_buffer_object *bo;
-
 
1210
	size_t acc_size;
-
 
1211
	int ret;
-
 
1212
 
-
 
1213
	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
 
1214
	if (unlikely(bo == NULL))
-
 
1215
		return -ENOMEM;
-
 
1216
 
-
 
1217
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-
 
1218
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
-
 
1219
			  interruptible, persistent_swap_storage, acc_size,
-
 
1220
			  NULL, NULL);
-
 
1221
	if (likely(ret == 0))
-
 
1222
		*p_bo = bo;
-
 
1223
 
-
 
1224
	return ret;
-
 
1225
}
-
 
1226
EXPORT_SYMBOL(ttm_bo_create);
-
 
1227
 
-
 
1228
 
-
 
1229
 
-
 
1230
 
-
 
1231
 
-
 
1232
 
-
 
1233
 
-
 
1234
 
-
 
1235
 
-
 
1236
 
-
 
1237
 
-
 
1238
 
-
 
1239
 
-
 
1240
 
-
 
1241
 
-
 
1242
 
-
 
1243
 
-
 
1244
 
-
 
1245
 
-
 
1246
 
1020
 
1247
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1021
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1248
			unsigned long p_size)
1022
			unsigned long p_size)
1249
{
1023
{
1250
    int ret = -EINVAL;
1024
    int ret = -EINVAL;
Line 1251... Line -...
1251
    struct ttm_mem_type_manager *man;
-
 
1252
 
-
 
1253
    ENTER();
1025
    struct ttm_mem_type_manager *man;
1254
 
1026
 
1255
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1027
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1256
    man = &bdev->man[type];
1028
    man = &bdev->man[type];
1257
	BUG_ON(man->has_type);
1029
	BUG_ON(man->has_type);
Line 1275... Line 1047...
1275
    man->use_type = true;
1047
    man->use_type = true;
1276
    man->size = p_size;
1048
    man->size = p_size;
Line 1277... Line 1049...
1277
 
1049
 
Line 1278... Line -...
1278
    INIT_LIST_HEAD(&man->lru);
-
 
1279
 
-
 
1280
    LEAVE();
1050
    INIT_LIST_HEAD(&man->lru);
1281
 
1051
 
1282
    return 0;
-
 
1283
}
-
 
-
 
1052
    return 0;
1284
 
1053
}
1285
 
1054
EXPORT_SYMBOL(ttm_bo_init_mm);
1286
void ttm_bo_global_release(struct drm_global_reference *ref)
1055
void ttm_bo_global_release(struct drm_global_reference *ref)
Line 1287... Line 1056...
1287
{
1056
{
Line 1295... Line 1064...
1295
    struct ttm_bo_global_ref *bo_ref =
1064
    struct ttm_bo_global_ref *bo_ref =
1296
        container_of(ref, struct ttm_bo_global_ref, ref);
1065
        container_of(ref, struct ttm_bo_global_ref, ref);
1297
    struct ttm_bo_global *glob = ref->object;
1066
    struct ttm_bo_global *glob = ref->object;
1298
    int ret;
1067
    int ret;
Line 1299... Line -...
1299
 
-
 
1300
    ENTER();
-
 
1301
 
1068
 
1302
	mutex_init(&glob->device_list_mutex);
1069
	mutex_init(&glob->device_list_mutex);
1303
	spin_lock_init(&glob->lru_lock);
1070
	spin_lock_init(&glob->lru_lock);
1304
    glob->mem_glob = bo_ref->mem_glob;
1071
    glob->mem_glob = bo_ref->mem_glob;
Line 1305... Line 1072...
1305
    glob->dummy_read_page = AllocPage();
1072
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1306
 
1073
 
1307
    if (unlikely(glob->dummy_read_page == NULL)) {
1074
    if (unlikely(glob->dummy_read_page == NULL)) {
1308
        ret = -ENOMEM;
1075
        ret = -ENOMEM;
Line 1312... Line 1079...
1312
    INIT_LIST_HEAD(&glob->swap_lru);
1079
    INIT_LIST_HEAD(&glob->swap_lru);
1313
    INIT_LIST_HEAD(&glob->device_list);
1080
    INIT_LIST_HEAD(&glob->device_list);
Line 1314... Line 1081...
1314
 
1081
 
Line 1315... Line -...
1315
    atomic_set(&glob->bo_count, 0);
-
 
1316
 
-
 
1317
    LEAVE();
1082
    atomic_set(&glob->bo_count, 0);
Line 1318... Line 1083...
1318
 
1083
 
1319
    return 0;
1084
    return 0;
1320
 
1085
 
1321
out_no_drp:
1086
out_no_drp:
1322
    kfree(glob);
1087
    kfree(glob);
Line 1323... Line -...
1323
    return ret;
-
 
1324
}
1088
    return ret;
1325
EXPORT_SYMBOL(ttm_bo_global_init);
1089
}
1326
 
1090
EXPORT_SYMBOL(ttm_bo_global_init);
-
 
1091
 
1327
 
1092
int ttm_bo_device_init(struct ttm_bo_device *bdev,
1328
int ttm_bo_device_init(struct ttm_bo_device *bdev,
1093
		       struct ttm_bo_global *glob,
1329
		       struct ttm_bo_global *glob,
1094
		       struct ttm_bo_driver *driver,
1330
		       struct ttm_bo_driver *driver,
1095
		       struct address_space *mapping,
Line 1331... Line -...
1331
		       uint64_t file_page_offset,
-
 
1332
		       bool need_dma32)
-
 
1333
{
1096
		       uint64_t file_page_offset,
Line 1334... Line 1097...
1334
	int ret = -EINVAL;
1097
		       bool need_dma32)
Line 1335... Line 1098...
1335
 
1098
{
Line 1347... Line 1110...
1347
	if (unlikely(ret != 0))
1110
	if (unlikely(ret != 0))
1348
		goto out_no_sys;
1111
		goto out_no_sys;
Line 1349... Line 1112...
1349
 
1112
 
1350
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1113
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1351
				    0x10000000);
1114
				    0x10000000);
1352
//	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1115
	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1353
	INIT_LIST_HEAD(&bdev->ddestroy);
1116
	INIT_LIST_HEAD(&bdev->ddestroy);
1354
	bdev->dev_mapping = NULL;
1117
	bdev->dev_mapping = mapping;
1355
	bdev->glob = glob;
1118
	bdev->glob = glob;
1356
	bdev->need_dma32 = need_dma32;
1119
	bdev->need_dma32 = need_dma32;
1357
	bdev->val_seq = 0;
1120
	bdev->val_seq = 0;
1358
	spin_lock_init(&bdev->fence_lock);
1121
	spin_lock_init(&bdev->fence_lock);
1359
	mutex_lock(&glob->device_list_mutex);
1122
	mutex_lock(&glob->device_list_mutex);
1360
	list_add_tail(&bdev->device_list, &glob->device_list);
1123
	list_add_tail(&bdev->device_list, &glob->device_list);
Line 1361... Line -...
1361
	mutex_unlock(&glob->device_list_mutex);
-
 
1362
 
-
 
1363
    LEAVE();
1124
	mutex_unlock(&glob->device_list_mutex);
1364
 
1125
 
1365
	return 0;
1126
	return 0;
1366
out_no_sys:
1127
out_no_sys:
1367
	return ret;
1128
	return ret;
Line 1387... Line 1148...
1387
			return false;
1148
			return false;
1388
	}
1149
	}
1389
	return true;
1150
	return true;
1390
}
1151
}
Line -... Line 1152...
-
 
1152
 
-
 
1153
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
-
 
1154
{
-
 
1155
	struct ttm_bo_device *bdev = bo->bdev;
-
 
1156
 
-
 
1157
	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
-
 
1158
	ttm_mem_io_free_vm(bo);
-
 
1159
}
-
 
1160
 
-
 
1161
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
-
 
1162
{
-
 
1163
	struct ttm_bo_device *bdev = bo->bdev;
-
 
1164
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
-
 
1165
 
-
 
1166
	ttm_mem_io_lock(man, false);
-
 
1167
	ttm_bo_unmap_virtual_locked(bo);
-
 
1168
	ttm_mem_io_unlock(man);
-
 
1169
}
-
 
1170
 
-
 
1171
 
-
 
1172
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-
 
1173
 
1391
 
1174
 
1392
int ttm_bo_wait(struct ttm_buffer_object *bo,
1175
int ttm_bo_wait(struct ttm_buffer_object *bo,
1393
		bool lazy, bool interruptible, bool no_wait)
1176
		bool lazy, bool interruptible, bool no_wait)
1394
{
1177
{
1395
	struct ttm_bo_driver *driver = bo->bdev->driver;
1178
	struct ttm_bo_driver *driver = bo->bdev->driver;
Line 1398... Line 1181...
1398
	int ret = 0;
1181
	int ret = 0;
Line 1399... Line 1182...
1399
 
1182
 
1400
	if (likely(bo->sync_obj == NULL))
1183
	if (likely(bo->sync_obj == NULL))
Line 1401... Line -...
1401
		return 0;
-
 
1402
 
-
 
1403
	return 0;
1184
		return 0;
Line 1404... Line 1185...
1404
}
1185
 
1405
EXPORT_SYMBOL(ttm_bo_wait);
-
 
1406
 
1186
	while (bo->sync_obj) {
-
 
1187
 
-
 
1188
		if (driver->sync_obj_signaled(bo->sync_obj)) {
-
 
1189
			void *tmp_obj = bo->sync_obj;
-
 
1190
			bo->sync_obj = NULL;
-
 
1191
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1407
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1192
			spin_unlock(&bdev->fence_lock);
-
 
1193
			driver->sync_obj_unref(&tmp_obj);
Line 1408... Line 1194...
1408
{
1194
			spin_lock(&bdev->fence_lock);
1409
	struct ttm_bo_device *bdev = bo->bdev;
-
 
1410
	int ret = 0;
1195
			continue;
Line -... Line 1196...
-
 
1196
		}
-
 
1197
 
-
 
1198
		if (no_wait)
-
 
1199
			return -EBUSY;
-
 
1200
 
-
 
1201
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-
 
1202
		spin_unlock(&bdev->fence_lock);
1411
 
1203
		ret = driver->sync_obj_wait(sync_obj,
1412
	/*
1204
					    lazy, interruptible);
-
 
1205
		if (unlikely(ret != 0)) {
-
 
1206
			driver->sync_obj_unref(&sync_obj);
-
 
1207
			spin_lock(&bdev->fence_lock);
-
 
1208
	return ret;
-
 
1209
		}
-
 
1210
		spin_lock(&bdev->fence_lock);
-
 
1211
		if (likely(bo->sync_obj == sync_obj)) {
1413
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1212
			void *tmp_obj = bo->sync_obj;
-
 
1213
			bo->sync_obj = NULL;
-
 
1214
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1414
	 */
1215
				  &bo->priv_flags);
-
 
1216
			spin_unlock(&bdev->fence_lock);
1415
 
1217
			driver->sync_obj_unref(&sync_obj);
-
 
1218
			driver->sync_obj_unref(&tmp_obj);
-
 
1219
			spin_lock(&bdev->fence_lock);
1416
	return ret;
1220
		} else {
1417
}
1221
			spin_unlock(&bdev->fence_lock);
1418
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1222
			driver->sync_obj_unref(&sync_obj);
1419
 
1223
			spin_lock(&bdev->fence_lock);
-
 
1224
		}