Subversion Repositories Kolibri OS

Rev

Rev 1128 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1120 serge 1
/*
2
 * Copyright 2009 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 */
26
/*
27
 * Authors:
28
 *    Jerome Glisse 
29
 *    Thomas Hellstrom 
30
 *    Dave Airlie
31
 */
1179 serge 32
#include 
33
#include 
1120 serge 34
#include "radeon_drm.h"
35
#include "radeon.h"
36
#include 
1126 serge 37
#include "radeon_object.h"
1120 serge 38
 
39
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
40
             int pages, u32_t *pagelist);
41
 
42
 
43
 
44
 
45
static struct drm_mm   mm_gtt;
46
static struct drm_mm   mm_vram;
47
 
48
 
49
int radeon_object_init(struct radeon_device *rdev)
50
{
51
    int r = 0;
52
 
1125 serge 53
    dbgprintf("%s\n",__FUNCTION__);
54
 
1120 serge 55
    r = drm_mm_init(&mm_vram, 0x800000 >> PAGE_SHIFT,
56
               ((rdev->mc.aper_size - 0x800000) >> PAGE_SHIFT));
57
    if (r) {
58
        DRM_ERROR("Failed initializing VRAM heap.\n");
59
        return r;
60
    };
61
 
62
    r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT));
63
    if (r) {
64
        DRM_ERROR("Failed initializing GTT heap.\n");
65
        return r;
66
    }
67
 
68
    return r;
69
 //   return radeon_ttm_init(rdev);
70
}
71
 
72
static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
73
{
74
    uint32_t flags = 0;
75
    if (domain & RADEON_GEM_DOMAIN_VRAM) {
76
        flags |= TTM_PL_FLAG_VRAM;
77
    }
78
    if (domain & RADEON_GEM_DOMAIN_GTT) {
79
        flags |= TTM_PL_FLAG_TT;
80
    }
81
    if (domain & RADEON_GEM_DOMAIN_CPU) {
82
        flags |= TTM_PL_FLAG_SYSTEM;
83
    }
84
    if (!flags) {
85
        flags |= TTM_PL_FLAG_SYSTEM;
86
    }
87
    return flags;
88
}
89
 
90
 
91
int radeon_object_create(struct radeon_device *rdev,
92
             struct drm_gem_object *gobj,
93
             unsigned long size,
94
             bool kernel,
95
             uint32_t domain,
96
             bool interruptible,
97
             struct radeon_object **robj_ptr)
98
{
99
    struct radeon_object *robj;
100
    enum ttm_bo_type type;
101
    uint32_t flags;
102
    int r;
103
 
104
    dbgprintf("%s\n",__FUNCTION__);
105
 
106
    if (kernel) {
107
        type = ttm_bo_type_kernel;
108
    } else {
109
        type = ttm_bo_type_device;
110
    }
111
    *robj_ptr = NULL;
112
    robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
113
    if (robj == NULL) {
114
        return -ENOMEM;
115
    }
116
    robj->rdev = rdev;
117
//    robj->gobj = gobj;
118
    INIT_LIST_HEAD(&robj->list);
119
 
120
    flags = radeon_object_flags_from_domain(domain);
121
 
122
    robj->flags = flags;
123
 
124
    dbgprintf("robj flags %x\n", robj->flags);
125
 
126
    if( flags & TTM_PL_FLAG_VRAM)
127
    {
128
        size_t num_pages;
129
 
130
        struct drm_mm_node *vm_node;
131
 
132
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
133
 
134
        if (num_pages == 0) {
135
            printk("Illegal buffer object size.\n");
136
            return -EINVAL;
137
        }
138
retry_pre_get:
139
        r = drm_mm_pre_get(&mm_vram);
140
 
141
        if (unlikely(r != 0))
142
            return r;
143
 
144
        vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0);
145
 
146
        if (unlikely(vm_node == NULL)) {
147
            r = -ENOMEM;
148
            return r;
149
        }
150
 
151
        robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
152
 
153
        if (unlikely(robj->mm_node == NULL)) {
154
            goto retry_pre_get;
155
        }
156
 
157
        robj->vm_addr = ((uint32_t)robj->mm_node->start);
158
 
159
        dbgprintf("alloc vram: base %x size %x\n",
160
                   robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
161
 
162
    };
163
 
164
    if( flags & TTM_PL_FLAG_TT)
165
    {
166
        size_t num_pages;
167
 
168
        struct drm_mm_node *vm_node;
169
 
170
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
171
 
172
        if (num_pages == 0) {
173
            printk("Illegal buffer object size.\n");
174
            return -EINVAL;
175
        }
176
retry_pre_get1:
177
        r = drm_mm_pre_get(&mm_gtt);
178
 
179
        if (unlikely(r != 0))
180
            return r;
181
 
182
        vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0);
183
 
184
        if (unlikely(vm_node == NULL)) {
185
            r = -ENOMEM;
186
            return r;
187
        }
188
 
189
        robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
190
 
191
        if (unlikely(robj->mm_node == NULL)) {
192
            goto retry_pre_get1;
193
        }
194
 
195
        robj->vm_addr = ((uint32_t)robj->mm_node->start) ;
196
 
197
        dbgprintf("alloc gtt: base %x size %x\n",
198
                   robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
199
    };
200
 
201
//   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
202
//                  0, 0, false, NULL, size,
203
//                  &radeon_ttm_object_object_destroy);
204
    if (unlikely(r != 0)) {
205
        /* ttm call radeon_ttm_object_object_destroy if error happen */
206
        DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
207
              size, flags, 0);
208
        return r;
209
    }
210
    *robj_ptr = robj;
211
//   if (gobj) {
212
//       list_add_tail(&robj->list, &rdev->gem.objects);
213
//   }
214
    return 0;
215
}
216
 
217
#define page_tabs  0xFDC00000
218
 
219
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
220
              uint64_t *gpu_addr)
221
{
222
    uint32_t flags;
223
    uint32_t tmp;
224
    int r = 0;
225
 
226
    dbgprintf("%s\n",__FUNCTION__);
227
 
228
//    flags = radeon_object_flags_from_domain(domain);
229
//   spin_lock(&robj->tobj.lock);
230
    if (robj->pin_count) {
231
        robj->pin_count++;
232
        if (gpu_addr != NULL) {
233
            *gpu_addr = robj->gpu_addr;
234
        }
235
//       spin_unlock(&robj->tobj.lock);
236
        return 0;
237
    }
238
//   spin_unlock(&robj->tobj.lock);
239
//    r = radeon_object_reserve(robj, false);
240
//    if (unlikely(r != 0)) {
241
//        DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
242
//        return r;
243
//    }
244
//    tmp = robj->tobj.mem.placement;
245
//    ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
246
//    robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
247
//    r = ttm_buffer_object_validate(&robj->tobj,
248
//                       robj->tobj.proposed_placement,
249
//                       false, false);
250
 
251
    robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT;
252
 
253
    if(robj->flags & TTM_PL_FLAG_VRAM)
254
        robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
255
    else if (robj->flags & TTM_PL_FLAG_TT)
256
    {
257
        u32_t *pagelist;
258
        robj->kptr  = KernelAlloc( robj->mm_node->size << PAGE_SHIFT );
259
        dbgprintf("kernel alloc %x\n", robj->kptr );
260
 
261
        pagelist =  &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12];
262
        dbgprintf("pagelist %x\n", pagelist);
263
        radeon_gart_bind(robj->rdev, robj->gpu_addr,
264
                         robj->mm_node->size,  pagelist);
265
        robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
266
    }
267
    else
268
    {
269
        DRM_ERROR("Unknown placement %d\n", robj->flags);
270
        robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
271
        r = -1;
272
    };
273
 
274
//    flags & TTM_PL_FLAG_VRAM
275
    if (gpu_addr != NULL) {
276
        *gpu_addr = robj->gpu_addr;
277
    }
278
    robj->pin_count = 1;
279
    if (unlikely(r != 0)) {
280
        DRM_ERROR("radeon: failed to pin object.\n");
281
    }
282
 
283
    dbgprintf("done %s\n",__FUNCTION__);
284
 
285
    return r;
286
}
287
 
288
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
289
{
290
    int r = 0;
291
 
292
    dbgprintf("%s\n",__FUNCTION__);
293
 
294
//   spin_lock(&robj->tobj.lock);
295
    if (robj->kptr) {
296
        if (ptr) {
297
            *ptr = robj->kptr;
298
        }
299
//       spin_unlock(&robj->tobj.lock);
300
        return 0;
301
    }
302
//   spin_unlock(&robj->tobj.lock);
303
 
304
    if(robj->flags & TTM_PL_FLAG_VRAM)
305
    {
306
        robj->cpu_addr = robj->rdev->mc.aper_base +
307
                         (robj->vm_addr << PAGE_SHIFT);
308
        robj->kptr = (void*)MapIoMem(robj->cpu_addr,
309
                           robj->mm_node->size << 12, PG_SW);
310
        dbgprintf("map io mem %x at %x\n", robj->cpu_addr, robj->kptr);
311
 
312
    }
313
    else
314
    {
315
        return -1;
316
    }
317
 
318
    if (ptr) {
319
        *ptr = robj->kptr;
320
    }
321
 
322
    dbgprintf("done %s\n",__FUNCTION__);
323
 
324
    return 0;
325
}
326
 
327
 
328
#if 0
329
 
330
void radeon_object_unpin(struct radeon_object *robj)
331
{
332
    uint32_t flags;
333
    int r;
334
 
335
//   spin_lock(&robj->tobj.lock);
336
    if (!robj->pin_count) {
337
//       spin_unlock(&robj->tobj.lock);
338
        printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
339
        return;
340
    }
341
    robj->pin_count--;
342
    if (robj->pin_count) {
343
//       spin_unlock(&robj->tobj.lock);
344
        return;
345
    }
346
//   spin_unlock(&robj->tobj.lock);
347
    r = radeon_object_reserve(robj, false);
348
    if (unlikely(r != 0)) {
349
        DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
350
        return;
351
    }
352
    flags = robj->tobj.mem.placement;
353
    robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
354
    r = ttm_buffer_object_validate(&robj->tobj,
355
                       robj->tobj.proposed_placement,
356
                       false, false);
357
    if (unlikely(r != 0)) {
358
        DRM_ERROR("radeon: failed to unpin buffer.\n");
359
    }
360
    radeon_object_unreserve(robj);
361
}
362
 
363
 
364
 
365
 
366
 
367
/*
368
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
369
 * function are calling it.
370
 */
371
 
372
static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
373
{
374
	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
375
}
376
 
377
static void radeon_object_unreserve(struct radeon_object *robj)
378
{
379
	ttm_bo_unreserve(&robj->tobj);
380
}
381
 
382
static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
383
{
384
	struct radeon_object *robj;
385
 
386
	robj = container_of(tobj, struct radeon_object, tobj);
387
//   list_del_init(&robj->list);
388
	kfree(robj);
389
}
390
 
391
static inline void radeon_object_gpu_addr(struct radeon_object *robj)
392
{
393
	/* Default gpu address */
394
	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
395
	if (robj->tobj.mem.mm_node == NULL) {
396
		return;
397
	}
398
	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
399
	switch (robj->tobj.mem.mem_type) {
400
	case TTM_PL_VRAM:
401
		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
402
		break;
403
	case TTM_PL_TT:
404
		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
405
		break;
406
	default:
407
		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
408
		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
409
		return;
410
	}
411
}
412
 
413
 
414
int radeon_object_create(struct radeon_device *rdev,
415
			 struct drm_gem_object *gobj,
416
			 unsigned long size,
417
			 bool kernel,
418
			 uint32_t domain,
419
			 bool interruptible,
420
			 struct radeon_object **robj_ptr)
421
{
422
	struct radeon_object *robj;
423
	enum ttm_bo_type type;
424
	uint32_t flags;
425
	int r;
426
 
427
//   if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
428
//       rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
429
//   }
430
	if (kernel) {
431
		type = ttm_bo_type_kernel;
432
	} else {
433
		type = ttm_bo_type_device;
434
	}
435
	*robj_ptr = NULL;
436
	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
437
	if (robj == NULL) {
438
		return -ENOMEM;
439
	}
440
	robj->rdev = rdev;
441
	robj->gobj = gobj;
442
//   INIT_LIST_HEAD(&robj->list);
443
 
444
	flags = radeon_object_flags_from_domain(domain);
445
//   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
446
//                  0, 0, false, NULL, size,
447
//                  &radeon_ttm_object_object_destroy);
448
	if (unlikely(r != 0)) {
449
		/* ttm call radeon_ttm_object_object_destroy if error happen */
450
		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
451
			  size, flags, 0);
452
		return r;
453
	}
454
	*robj_ptr = robj;
455
//   if (gobj) {
456
//       list_add_tail(&robj->list, &rdev->gem.objects);
457
//   }
458
	return 0;
459
}
460
 
461
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
462
{
463
	int r;
464
 
465
//   spin_lock(&robj->tobj.lock);
466
	if (robj->kptr) {
467
		if (ptr) {
468
			*ptr = robj->kptr;
469
		}
470
//       spin_unlock(&robj->tobj.lock);
471
		return 0;
472
	}
473
//   spin_unlock(&robj->tobj.lock);
474
	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
475
	if (r) {
476
		return r;
477
	}
478
//   spin_lock(&robj->tobj.lock);
479
	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
480
//   spin_unlock(&robj->tobj.lock);
481
	if (ptr) {
482
		*ptr = robj->kptr;
483
	}
484
	return 0;
485
}
486
 
487
void radeon_object_kunmap(struct radeon_object *robj)
488
{
489
//   spin_lock(&robj->tobj.lock);
490
	if (robj->kptr == NULL) {
491
//       spin_unlock(&robj->tobj.lock);
492
		return;
493
	}
494
	robj->kptr = NULL;
495
//   spin_unlock(&robj->tobj.lock);
496
	ttm_bo_kunmap(&robj->kmap);
497
}
498
 
499
void radeon_object_unref(struct radeon_object **robj)
500
{
501
	struct ttm_buffer_object *tobj;
502
 
503
	if ((*robj) == NULL) {
504
		return;
505
	}
506
	tobj = &((*robj)->tobj);
507
	ttm_bo_unref(&tobj);
508
	if (tobj == NULL) {
509
		*robj = NULL;
510
	}
511
}
512
 
513
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
514
{
515
	*offset = robj->tobj.addr_space_offset;
516
	return 0;
517
}
518
 
519
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
520
		      uint64_t *gpu_addr)
521
{
522
	uint32_t flags;
523
	uint32_t tmp;
524
	int r;
525
 
526
	flags = radeon_object_flags_from_domain(domain);
527
//   spin_lock(&robj->tobj.lock);
528
	if (robj->pin_count) {
529
		robj->pin_count++;
530
		if (gpu_addr != NULL) {
531
			*gpu_addr = robj->gpu_addr;
532
		}
533
//       spin_unlock(&robj->tobj.lock);
534
		return 0;
535
	}
536
//   spin_unlock(&robj->tobj.lock);
537
	r = radeon_object_reserve(robj, false);
538
	if (unlikely(r != 0)) {
539
		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
540
		return r;
541
	}
542
	tmp = robj->tobj.mem.placement;
543
	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
544
	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
545
	r = ttm_buffer_object_validate(&robj->tobj,
546
				       robj->tobj.proposed_placement,
547
				       false, false);
548
	radeon_object_gpu_addr(robj);
549
	if (gpu_addr != NULL) {
550
		*gpu_addr = robj->gpu_addr;
551
	}
552
	robj->pin_count = 1;
553
	if (unlikely(r != 0)) {
554
		DRM_ERROR("radeon: failed to pin object.\n");
555
	}
556
	radeon_object_unreserve(robj);
557
	return r;
558
}
559
 
560
void radeon_object_unpin(struct radeon_object *robj)
561
{
562
	uint32_t flags;
563
	int r;
564
 
565
//   spin_lock(&robj->tobj.lock);
566
	if (!robj->pin_count) {
567
//       spin_unlock(&robj->tobj.lock);
568
		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
569
		return;
570
	}
571
	robj->pin_count--;
572
	if (robj->pin_count) {
573
//       spin_unlock(&robj->tobj.lock);
574
		return;
575
	}
576
//   spin_unlock(&robj->tobj.lock);
577
	r = radeon_object_reserve(robj, false);
578
	if (unlikely(r != 0)) {
579
		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
580
		return;
581
	}
582
	flags = robj->tobj.mem.placement;
583
	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
584
	r = ttm_buffer_object_validate(&robj->tobj,
585
				       robj->tobj.proposed_placement,
586
				       false, false);
587
	if (unlikely(r != 0)) {
588
		DRM_ERROR("radeon: failed to unpin buffer.\n");
589
	}
590
	radeon_object_unreserve(robj);
591
}
592
 
593
int radeon_object_wait(struct radeon_object *robj)
594
{
595
	int r = 0;
596
 
597
	/* FIXME: should use block reservation instead */
598
	r = radeon_object_reserve(robj, true);
599
	if (unlikely(r != 0)) {
600
		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
601
		return r;
602
	}
603
//   spin_lock(&robj->tobj.lock);
604
	if (robj->tobj.sync_obj) {
605
		r = ttm_bo_wait(&robj->tobj, true, false, false);
606
	}
607
//   spin_unlock(&robj->tobj.lock);
608
	radeon_object_unreserve(robj);
609
	return r;
610
}
611
 
612
int radeon_object_evict_vram(struct radeon_device *rdev)
613
{
614
	if (rdev->flags & RADEON_IS_IGP) {
615
		/* Useless to evict on IGP chips */
616
		return 0;
617
	}
618
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
619
}
620
 
621
void radeon_object_force_delete(struct radeon_device *rdev)
622
{
623
	struct radeon_object *robj, *n;
624
	struct drm_gem_object *gobj;
625
 
626
	if (list_empty(&rdev->gem.objects)) {
627
		return;
628
	}
629
	DRM_ERROR("Userspace still has active objects !\n");
630
	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
631
		mutex_lock(&rdev->ddev->struct_mutex);
632
		gobj = robj->gobj;
633
		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
634
			  gobj, robj, (unsigned long)gobj->size,
635
			  *((unsigned long *)&gobj->refcount));
636
		list_del_init(&robj->list);
637
		radeon_object_unref(&robj);
638
		gobj->driver_private = NULL;
639
		drm_gem_object_unreference(gobj);
640
		mutex_unlock(&rdev->ddev->struct_mutex);
641
	}
642
}
643
 
644
void radeon_object_fini(struct radeon_device *rdev)
645
{
646
	radeon_ttm_fini(rdev);
647
}
648
 
649
void radeon_object_list_add_object(struct radeon_object_list *lobj,
650
				   struct list_head *head)
651
{
652
	if (lobj->wdomain) {
653
		list_add(&lobj->list, head);
654
	} else {
655
		list_add_tail(&lobj->list, head);
656
	}
657
}
658
 
659
int radeon_object_list_reserve(struct list_head *head)
660
{
661
	struct radeon_object_list *lobj;
662
	struct list_head *i;
663
	int r;
664
 
665
	list_for_each(i, head) {
666
		lobj = list_entry(i, struct radeon_object_list, list);
667
		if (!lobj->robj->pin_count) {
668
			r = radeon_object_reserve(lobj->robj, true);
669
			if (unlikely(r != 0)) {
670
				DRM_ERROR("radeon: failed to reserve object.\n");
671
				return r;
672
			}
673
		} else {
674
		}
675
	}
676
	return 0;
677
}
678
 
679
void radeon_object_list_unreserve(struct list_head *head)
680
{
681
	struct radeon_object_list *lobj;
682
	struct list_head *i;
683
 
684
	list_for_each(i, head) {
685
		lobj = list_entry(i, struct radeon_object_list, list);
686
		if (!lobj->robj->pin_count) {
687
			radeon_object_unreserve(lobj->robj);
688
		} else {
689
		}
690
	}
691
}
692
 
693
int radeon_object_list_validate(struct list_head *head, void *fence)
694
{
695
	struct radeon_object_list *lobj;
696
	struct radeon_object *robj;
697
	struct radeon_fence *old_fence = NULL;
698
	struct list_head *i;
699
	uint32_t flags;
700
	int r;
701
 
702
	r = radeon_object_list_reserve(head);
703
	if (unlikely(r != 0)) {
704
		radeon_object_list_unreserve(head);
705
		return r;
706
	}
707
	list_for_each(i, head) {
708
		lobj = list_entry(i, struct radeon_object_list, list);
709
		robj = lobj->robj;
710
		if (lobj->wdomain) {
711
			flags = radeon_object_flags_from_domain(lobj->wdomain);
712
			flags |= TTM_PL_FLAG_TT;
713
		} else {
714
			flags = radeon_object_flags_from_domain(lobj->rdomain);
715
			flags |= TTM_PL_FLAG_TT;
716
			flags |= TTM_PL_FLAG_VRAM;
717
		}
718
		if (!robj->pin_count) {
719
			robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
720
			r = ttm_buffer_object_validate(&robj->tobj,
721
						       robj->tobj.proposed_placement,
722
						       true, false);
723
			if (unlikely(r)) {
724
				radeon_object_list_unreserve(head);
725
				DRM_ERROR("radeon: failed to validate.\n");
726
				return r;
727
			}
728
			radeon_object_gpu_addr(robj);
729
		}
730
		lobj->gpu_offset = robj->gpu_addr;
731
		if (fence) {
732
			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
733
			robj->tobj.sync_obj = radeon_fence_ref(fence);
734
			robj->tobj.sync_obj_arg = NULL;
735
		}
736
		if (old_fence) {
737
			radeon_fence_unref(&old_fence);
738
		}
739
	}
740
	return 0;
741
}
742
 
743
void radeon_object_list_unvalidate(struct list_head *head)
744
{
745
	struct radeon_object_list *lobj;
746
	struct radeon_fence *old_fence = NULL;
747
	struct list_head *i;
748
 
749
	list_for_each(i, head) {
750
		lobj = list_entry(i, struct radeon_object_list, list);
751
		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
752
		lobj->robj->tobj.sync_obj = NULL;
753
		if (old_fence) {
754
			radeon_fence_unref(&old_fence);
755
		}
756
	}
757
	radeon_object_list_unreserve(head);
758
}
759
 
760
void radeon_object_list_clean(struct list_head *head)
761
{
762
	radeon_object_list_unreserve(head);
763
}
764
 
765
int radeon_object_fbdev_mmap(struct radeon_object *robj,
766
			     struct vm_area_struct *vma)
767
{
768
	return ttm_fbdev_mmap(vma, &robj->tobj);
769
}
770
 
1128 serge 771
#endif
772
 
1120 serge 773
unsigned long radeon_object_size(struct radeon_object *robj)
774
{
775
	return robj->tobj.num_pages << PAGE_SHIFT;
776
}
777