Subversion Repositories Kolibri OS

Rev

Rev 1313 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1120 serge 1
/*
2
 * Copyright 2009 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 */
26
/*
27
 * Authors:
28
 *    Jerome Glisse 
29
 *    Thomas Hellstrom 
30
 *    Dave Airlie
31
 */
1179 serge 32
#include 
1963 serge 33
#include 
1179 serge 34
#include 
1120 serge 35
#include "radeon_drm.h"
36
#include "radeon.h"
37
#include 
1126 serge 38
#include "radeon_object.h"
1120 serge 39
 
40
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
41
             int pages, u32_t *pagelist);
42
 
43
 
44
 
45
 
46
static struct drm_mm   mm_gtt;
47
static struct drm_mm   mm_vram;
48
 
49
 
50
int radeon_object_init(struct radeon_device *rdev)
51
{
52
    int r = 0;
53
 
1182 serge 54
    ENTER();
1125 serge 55
 
1313 serge 56
    r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
57
               ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
1120 serge 58
    if (r) {
59
        DRM_ERROR("Failed initializing VRAM heap.\n");
60
        return r;
61
    };
62
 
63
    r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT));
64
    if (r) {
65
        DRM_ERROR("Failed initializing GTT heap.\n");
66
        return r;
67
    }
68
 
69
    return r;
70
 //   return radeon_ttm_init(rdev);
71
}
72
 
73
static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
74
{
75
    uint32_t flags = 0;
76
    if (domain & RADEON_GEM_DOMAIN_VRAM) {
77
        flags |= TTM_PL_FLAG_VRAM;
78
    }
79
    if (domain & RADEON_GEM_DOMAIN_GTT) {
80
        flags |= TTM_PL_FLAG_TT;
81
    }
82
    if (domain & RADEON_GEM_DOMAIN_CPU) {
83
        flags |= TTM_PL_FLAG_SYSTEM;
84
    }
85
    if (!flags) {
86
        flags |= TTM_PL_FLAG_SYSTEM;
87
    }
88
    return flags;
89
}
90
 
91
 
1963 serge 92
int radeon_bo_create(struct radeon_device *rdev,
93
        unsigned long size, int byte_align, bool kernel, u32 domain,
94
             struct radeon_bo **bo_ptr)
1120 serge 95
{
1963 serge 96
    struct radeon_bo *bo;
1120 serge 97
    enum ttm_bo_type type;
98
    uint32_t flags;
99
    int r;
100
 
101
    if (kernel) {
102
        type = ttm_bo_type_kernel;
103
    } else {
104
        type = ttm_bo_type_device;
105
    }
1963 serge 106
    *bo_ptr = NULL;
107
    bo = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
108
    if (bo == NULL) {
1120 serge 109
        return -ENOMEM;
110
    }
1963 serge 111
    bo->rdev = rdev;
112
    INIT_LIST_HEAD(&bo->list);
1120 serge 113
 
114
    flags = radeon_object_flags_from_domain(domain);
115
 
1963 serge 116
    bo->flags = flags;
1120 serge 117
 
118
    if( flags & TTM_PL_FLAG_VRAM)
119
    {
120
        size_t num_pages;
121
 
122
        struct drm_mm_node *vm_node;
123
 
124
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
125
 
126
        if (num_pages == 0) {
1182 serge 127
            dbgprintf("Illegal buffer object size.\n");
1120 serge 128
            return -EINVAL;
129
        }
130
retry_pre_get:
131
        r = drm_mm_pre_get(&mm_vram);
132
 
133
        if (unlikely(r != 0))
134
            return r;
135
 
136
        vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0);
137
 
138
        if (unlikely(vm_node == NULL)) {
139
            r = -ENOMEM;
140
            return r;
141
        }
142
 
1963 serge 143
        bo->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
1120 serge 144
 
1963 serge 145
        if (unlikely(bo->mm_node == NULL)) {
1120 serge 146
            goto retry_pre_get;
147
        }
148
 
1963 serge 149
        bo->vm_addr = ((uint32_t)bo->mm_node->start);
1120 serge 150
 
1268 serge 151
//        dbgprintf("alloc vram: base %x size %x\n",
152
//                   robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
1120 serge 153
 
154
    };
155
 
156
    if( flags & TTM_PL_FLAG_TT)
157
    {
158
        size_t num_pages;
159
 
160
        struct drm_mm_node *vm_node;
161
 
162
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
163
 
164
        if (num_pages == 0) {
1182 serge 165
            dbgprintf("Illegal buffer object size.\n");
1120 serge 166
            return -EINVAL;
167
        }
168
retry_pre_get1:
169
        r = drm_mm_pre_get(&mm_gtt);
170
 
171
        if (unlikely(r != 0))
172
            return r;
173
 
174
        vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0);
175
 
176
        if (unlikely(vm_node == NULL)) {
177
            r = -ENOMEM;
178
            return r;
179
        }
180
 
181
        robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
182
 
183
        if (unlikely(robj->mm_node == NULL)) {
184
            goto retry_pre_get1;
185
        }
186
 
1963 serge 187
        bo->vm_addr = ((uint32_t)bo->mm_node->start) ;
1120 serge 188
 
1268 serge 189
//        dbgprintf("alloc gtt: base %x size %x\n",
190
//                   robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
1120 serge 191
    };
192
 
193
//   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
194
//                  0, 0, false, NULL, size,
195
//                  &radeon_ttm_object_object_destroy);
196
    if (unlikely(r != 0)) {
197
        /* ttm call radeon_ttm_object_object_destroy if error happen */
198
        DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
199
              size, flags, 0);
200
        return r;
201
    }
202
    *robj_ptr = robj;
203
//   if (gobj) {
204
//       list_add_tail(&robj->list, &rdev->gem.objects);
205
//   }
206
    return 0;
207
}
208
 
209
#define page_tabs  0xFDC00000
210
 
211
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
212
              uint64_t *gpu_addr)
213
{
214
    uint32_t flags;
215
    uint32_t tmp;
216
    int r = 0;
217
 
218
//    flags = radeon_object_flags_from_domain(domain);
219
//   spin_lock(&robj->tobj.lock);
220
    if (robj->pin_count) {
221
        robj->pin_count++;
222
        if (gpu_addr != NULL) {
223
            *gpu_addr = robj->gpu_addr;
224
        }
225
//       spin_unlock(&robj->tobj.lock);
226
        return 0;
227
    }
228
//   spin_unlock(&robj->tobj.lock);
229
//    r = radeon_object_reserve(robj, false);
230
//    if (unlikely(r != 0)) {
231
//        DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
232
//        return r;
233
//    }
234
//    tmp = robj->tobj.mem.placement;
235
//    ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
236
//    robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
237
//    r = ttm_buffer_object_validate(&robj->tobj,
238
//                       robj->tobj.proposed_placement,
239
//                       false, false);
240
 
241
    robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT;
242
 
243
    if(robj->flags & TTM_PL_FLAG_VRAM)
244
        robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
245
    else if (robj->flags & TTM_PL_FLAG_TT)
246
    {
247
        u32_t *pagelist;
248
        robj->kptr  = KernelAlloc( robj->mm_node->size << PAGE_SHIFT );
249
        dbgprintf("kernel alloc %x\n", robj->kptr );
250
 
251
        pagelist =  &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12];
252
        dbgprintf("pagelist %x\n", pagelist);
253
        radeon_gart_bind(robj->rdev, robj->gpu_addr,
254
                         robj->mm_node->size,  pagelist);
255
        robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
256
    }
257
    else
258
    {
259
        DRM_ERROR("Unknown placement %d\n", robj->flags);
260
        robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
261
        r = -1;
262
    };
263
 
264
//    flags & TTM_PL_FLAG_VRAM
265
    if (gpu_addr != NULL) {
266
        *gpu_addr = robj->gpu_addr;
267
    }
268
    robj->pin_count = 1;
269
    if (unlikely(r != 0)) {
270
        DRM_ERROR("radeon: failed to pin object.\n");
271
    }
272
 
273
    return r;
274
}
275
 
276
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
277
{
278
    int r = 0;
279
 
280
//   spin_lock(&robj->tobj.lock);
281
    if (robj->kptr) {
282
        if (ptr) {
283
            *ptr = robj->kptr;
284
        }
285
//       spin_unlock(&robj->tobj.lock);
286
        return 0;
287
    }
288
//   spin_unlock(&robj->tobj.lock);
289
 
290
    if(robj->flags & TTM_PL_FLAG_VRAM)
291
    {
292
        robj->cpu_addr = robj->rdev->mc.aper_base +
293
                         (robj->vm_addr << PAGE_SHIFT);
294
        robj->kptr = (void*)MapIoMem(robj->cpu_addr,
295
                           robj->mm_node->size << 12, PG_SW);
296
    }
297
    else
298
    {
299
        return -1;
300
    }
301
 
302
    if (ptr) {
303
        *ptr = robj->kptr;
304
    }
305
 
306
    return 0;
307
}
308
 
1182 serge 309
void radeon_object_kunmap(struct radeon_object *robj)
310
{
311
//   spin_lock(&robj->tobj.lock);
312
    if (robj->kptr == NULL) {
313
//       spin_unlock(&robj->tobj.lock);
314
        return;
315
    }
1120 serge 316
 
1182 serge 317
    if (robj->flags & TTM_PL_FLAG_VRAM)
318
    {
319
        FreeKernelSpace(robj->kptr);
320
        robj->kptr = NULL;
321
    }
322
//   spin_unlock(&robj->tobj.lock);
323
}
324
 
1120 serge 325
 
326
void radeon_object_unpin(struct radeon_object *robj)
327
{
328
    uint32_t flags;
329
    int r;
330
 
331
//   spin_lock(&robj->tobj.lock);
332
    if (!robj->pin_count) {
333
//       spin_unlock(&robj->tobj.lock);
334
        printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
335
        return;
336
    }
337
    robj->pin_count--;
338
    if (robj->pin_count) {
339
//       spin_unlock(&robj->tobj.lock);
340
        return;
341
    }
342
//   spin_unlock(&robj->tobj.lock);
1313 serge 343
 
344
    drm_mm_put_block(robj->mm_node);
345
 
346
    kfree(robj);
1120 serge 347
}
348
 
349
 
1313 serge 350
#if 0
1120 serge 351
 
352
 
353
/*
354
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
355
 * function are calling it.
356
 */
357
 
358
static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
359
{
360
	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
361
}
362
 
363
static void radeon_object_unreserve(struct radeon_object *robj)
364
{
365
	ttm_bo_unreserve(&robj->tobj);
366
}
367
 
368
static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
369
{
370
	struct radeon_object *robj;
371
 
372
	robj = container_of(tobj, struct radeon_object, tobj);
373
//   list_del_init(&robj->list);
374
	kfree(robj);
375
}
376
 
377
static inline void radeon_object_gpu_addr(struct radeon_object *robj)
378
{
379
	/* Default gpu address */
380
	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
381
	if (robj->tobj.mem.mm_node == NULL) {
382
		return;
383
	}
384
	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
385
	switch (robj->tobj.mem.mem_type) {
386
	case TTM_PL_VRAM:
387
		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
388
		break;
389
	case TTM_PL_TT:
390
		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
391
		break;
392
	default:
393
		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
394
		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
395
		return;
396
	}
397
}
398
 
399
 
400
int radeon_object_create(struct radeon_device *rdev,
401
			 struct drm_gem_object *gobj,
402
			 unsigned long size,
403
			 bool kernel,
404
			 uint32_t domain,
405
			 bool interruptible,
406
			 struct radeon_object **robj_ptr)
407
{
408
	struct radeon_object *robj;
409
	enum ttm_bo_type type;
410
	uint32_t flags;
411
	int r;
412
 
413
//   if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
414
//       rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
415
//   }
416
	if (kernel) {
417
		type = ttm_bo_type_kernel;
418
	} else {
419
		type = ttm_bo_type_device;
420
	}
421
	*robj_ptr = NULL;
422
	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
423
	if (robj == NULL) {
424
		return -ENOMEM;
425
	}
426
	robj->rdev = rdev;
427
	robj->gobj = gobj;
428
//   INIT_LIST_HEAD(&robj->list);
429
 
430
	flags = radeon_object_flags_from_domain(domain);
431
//   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
432
//                  0, 0, false, NULL, size,
433
//                  &radeon_ttm_object_object_destroy);
434
	if (unlikely(r != 0)) {
435
		/* ttm call radeon_ttm_object_object_destroy if error happen */
436
		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
437
			  size, flags, 0);
438
		return r;
439
	}
440
	*robj_ptr = robj;
441
//   if (gobj) {
442
//       list_add_tail(&robj->list, &rdev->gem.objects);
443
//   }
444
	return 0;
445
}
446
 
447
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
448
{
449
	int r;
450
 
451
//   spin_lock(&robj->tobj.lock);
452
	if (robj->kptr) {
453
		if (ptr) {
454
			*ptr = robj->kptr;
455
		}
456
//       spin_unlock(&robj->tobj.lock);
457
		return 0;
458
	}
459
//   spin_unlock(&robj->tobj.lock);
460
	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
461
	if (r) {
462
		return r;
463
	}
464
//   spin_lock(&robj->tobj.lock);
465
	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
466
//   spin_unlock(&robj->tobj.lock);
467
	if (ptr) {
468
		*ptr = robj->kptr;
469
	}
470
	return 0;
471
}
472
 
473
void radeon_object_kunmap(struct radeon_object *robj)
474
{
475
//   spin_lock(&robj->tobj.lock);
476
	if (robj->kptr == NULL) {
477
//       spin_unlock(&robj->tobj.lock);
478
		return;
479
	}
480
	robj->kptr = NULL;
481
//   spin_unlock(&robj->tobj.lock);
482
	ttm_bo_kunmap(&robj->kmap);
483
}
484
 
485
void radeon_object_unref(struct radeon_object **robj)
486
{
487
	struct ttm_buffer_object *tobj;
488
 
489
	if ((*robj) == NULL) {
490
		return;
491
	}
492
	tobj = &((*robj)->tobj);
493
	ttm_bo_unref(&tobj);
494
	if (tobj == NULL) {
495
		*robj = NULL;
496
	}
497
}
498
 
499
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
500
{
501
	*offset = robj->tobj.addr_space_offset;
502
	return 0;
503
}
504
 
505
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
506
		      uint64_t *gpu_addr)
507
{
508
	uint32_t flags;
509
	uint32_t tmp;
510
	int r;
511
 
512
	flags = radeon_object_flags_from_domain(domain);
513
//   spin_lock(&robj->tobj.lock);
514
	if (robj->pin_count) {
515
		robj->pin_count++;
516
		if (gpu_addr != NULL) {
517
			*gpu_addr = robj->gpu_addr;
518
		}
519
//       spin_unlock(&robj->tobj.lock);
520
		return 0;
521
	}
522
//   spin_unlock(&robj->tobj.lock);
523
	r = radeon_object_reserve(robj, false);
524
	if (unlikely(r != 0)) {
525
		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
526
		return r;
527
	}
528
	tmp = robj->tobj.mem.placement;
529
	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
530
	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
531
	r = ttm_buffer_object_validate(&robj->tobj,
532
				       robj->tobj.proposed_placement,
533
				       false, false);
534
	radeon_object_gpu_addr(robj);
535
	if (gpu_addr != NULL) {
536
		*gpu_addr = robj->gpu_addr;
537
	}
538
	robj->pin_count = 1;
539
	if (unlikely(r != 0)) {
540
		DRM_ERROR("radeon: failed to pin object.\n");
541
	}
542
	radeon_object_unreserve(robj);
543
	return r;
544
}
545
 
546
void radeon_object_unpin(struct radeon_object *robj)
547
{
548
	uint32_t flags;
549
	int r;
550
 
551
//   spin_lock(&robj->tobj.lock);
552
	if (!robj->pin_count) {
553
//       spin_unlock(&robj->tobj.lock);
554
		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
555
		return;
556
	}
557
	robj->pin_count--;
558
	if (robj->pin_count) {
559
//       spin_unlock(&robj->tobj.lock);
560
		return;
561
	}
562
//   spin_unlock(&robj->tobj.lock);
563
	r = radeon_object_reserve(robj, false);
564
	if (unlikely(r != 0)) {
565
		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
566
		return;
567
	}
568
	flags = robj->tobj.mem.placement;
569
	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
570
	r = ttm_buffer_object_validate(&robj->tobj,
571
				       robj->tobj.proposed_placement,
572
				       false, false);
573
	if (unlikely(r != 0)) {
574
		DRM_ERROR("radeon: failed to unpin buffer.\n");
575
	}
576
	radeon_object_unreserve(robj);
577
}
578
 
579
int radeon_object_wait(struct radeon_object *robj)
580
{
581
	int r = 0;
582
 
583
	/* FIXME: should use block reservation instead */
584
	r = radeon_object_reserve(robj, true);
585
	if (unlikely(r != 0)) {
586
		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
587
		return r;
588
	}
589
//   spin_lock(&robj->tobj.lock);
590
	if (robj->tobj.sync_obj) {
591
		r = ttm_bo_wait(&robj->tobj, true, false, false);
592
	}
593
//   spin_unlock(&robj->tobj.lock);
594
	radeon_object_unreserve(robj);
595
	return r;
596
}
597
 
598
int radeon_object_evict_vram(struct radeon_device *rdev)
599
{
600
	if (rdev->flags & RADEON_IS_IGP) {
601
		/* Useless to evict on IGP chips */
602
		return 0;
603
	}
604
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
605
}
606
 
607
void radeon_object_force_delete(struct radeon_device *rdev)
608
{
609
	struct radeon_object *robj, *n;
610
	struct drm_gem_object *gobj;
611
 
612
	if (list_empty(&rdev->gem.objects)) {
613
		return;
614
	}
615
	DRM_ERROR("Userspace still has active objects !\n");
616
	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
617
		mutex_lock(&rdev->ddev->struct_mutex);
618
		gobj = robj->gobj;
619
		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
620
			  gobj, robj, (unsigned long)gobj->size,
621
			  *((unsigned long *)&gobj->refcount));
622
		list_del_init(&robj->list);
623
		radeon_object_unref(&robj);
624
		gobj->driver_private = NULL;
625
		drm_gem_object_unreference(gobj);
626
		mutex_unlock(&rdev->ddev->struct_mutex);
627
	}
628
}
629
 
630
void radeon_object_fini(struct radeon_device *rdev)
631
{
632
	radeon_ttm_fini(rdev);
633
}
634
 
635
void radeon_object_list_add_object(struct radeon_object_list *lobj,
636
				   struct list_head *head)
637
{
638
	if (lobj->wdomain) {
639
		list_add(&lobj->list, head);
640
	} else {
641
		list_add_tail(&lobj->list, head);
642
	}
643
}
644
 
645
int radeon_object_list_reserve(struct list_head *head)
646
{
647
	struct radeon_object_list *lobj;
648
	struct list_head *i;
649
	int r;
650
 
651
	list_for_each(i, head) {
652
		lobj = list_entry(i, struct radeon_object_list, list);
653
		if (!lobj->robj->pin_count) {
654
			r = radeon_object_reserve(lobj->robj, true);
655
			if (unlikely(r != 0)) {
656
				DRM_ERROR("radeon: failed to reserve object.\n");
657
				return r;
658
			}
659
		} else {
660
		}
661
	}
662
	return 0;
663
}
664
 
665
void radeon_object_list_unreserve(struct list_head *head)
666
{
667
	struct radeon_object_list *lobj;
668
	struct list_head *i;
669
 
670
	list_for_each(i, head) {
671
		lobj = list_entry(i, struct radeon_object_list, list);
672
		if (!lobj->robj->pin_count) {
673
			radeon_object_unreserve(lobj->robj);
674
		} else {
675
		}
676
	}
677
}
678
 
679
int radeon_object_list_validate(struct list_head *head, void *fence)
680
{
681
	struct radeon_object_list *lobj;
682
	struct radeon_object *robj;
683
	struct radeon_fence *old_fence = NULL;
684
	struct list_head *i;
685
	uint32_t flags;
686
	int r;
687
 
688
	r = radeon_object_list_reserve(head);
689
	if (unlikely(r != 0)) {
690
		radeon_object_list_unreserve(head);
691
		return r;
692
	}
693
	list_for_each(i, head) {
694
		lobj = list_entry(i, struct radeon_object_list, list);
695
		robj = lobj->robj;
696
		if (lobj->wdomain) {
697
			flags = radeon_object_flags_from_domain(lobj->wdomain);
698
			flags |= TTM_PL_FLAG_TT;
699
		} else {
700
			flags = radeon_object_flags_from_domain(lobj->rdomain);
701
			flags |= TTM_PL_FLAG_TT;
702
			flags |= TTM_PL_FLAG_VRAM;
703
		}
704
		if (!robj->pin_count) {
705
			robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
706
			r = ttm_buffer_object_validate(&robj->tobj,
707
						       robj->tobj.proposed_placement,
708
						       true, false);
709
			if (unlikely(r)) {
710
				radeon_object_list_unreserve(head);
711
				DRM_ERROR("radeon: failed to validate.\n");
712
				return r;
713
			}
714
			radeon_object_gpu_addr(robj);
715
		}
716
		lobj->gpu_offset = robj->gpu_addr;
717
		if (fence) {
718
			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
719
			robj->tobj.sync_obj = radeon_fence_ref(fence);
720
			robj->tobj.sync_obj_arg = NULL;
721
		}
722
		if (old_fence) {
723
			radeon_fence_unref(&old_fence);
724
		}
725
	}
726
	return 0;
727
}
728
 
729
void radeon_object_list_unvalidate(struct list_head *head)
730
{
731
	struct radeon_object_list *lobj;
732
	struct radeon_fence *old_fence = NULL;
733
	struct list_head *i;
734
 
735
	list_for_each(i, head) {
736
		lobj = list_entry(i, struct radeon_object_list, list);
737
		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
738
		lobj->robj->tobj.sync_obj = NULL;
739
		if (old_fence) {
740
			radeon_fence_unref(&old_fence);
741
		}
742
	}
743
	radeon_object_list_unreserve(head);
744
}
745
 
746
void radeon_object_list_clean(struct list_head *head)
747
{
748
	radeon_object_list_unreserve(head);
749
}
750
 
751
int radeon_object_fbdev_mmap(struct radeon_object *robj,
752
			     struct vm_area_struct *vma)
753
{
754
	return ttm_fbdev_mmap(vma, &robj->tobj);
755
}
756
 
1128 serge 757
#endif
758
 
1120 serge 759
unsigned long radeon_object_size(struct radeon_object *robj)
760
{
761
	return robj->tobj.num_pages << PAGE_SHIFT;
762
}
763