Subversion Repositories Kolibri OS

Rev

Rev 1182 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1120 serge 1
/*
2
 * Copyright 2009 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 */
26
/*
27
 * Authors:
28
 *    Jerome Glisse 
29
 *    Thomas Hellstrom 
30
 *    Dave Airlie
31
 */
1179 serge 32
#include 
33
#include 
1120 serge 34
#include "radeon_drm.h"
35
#include "radeon.h"
36
#include 
1126 serge 37
#include "radeon_object.h"
1120 serge 38
 
39
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
40
             int pages, u32_t *pagelist);
41
 
42
 
43
 
44
 
45
static struct drm_mm   mm_gtt;
46
static struct drm_mm   mm_vram;
47
 
48
 
49
int radeon_object_init(struct radeon_device *rdev)
50
{
51
    int r = 0;
52
 
1182 serge 53
    ENTER();
1125 serge 54
 
1120 serge 55
    r = drm_mm_init(&mm_vram, 0x800000 >> PAGE_SHIFT,
56
               ((rdev->mc.aper_size - 0x800000) >> PAGE_SHIFT));
57
    if (r) {
58
        DRM_ERROR("Failed initializing VRAM heap.\n");
59
        return r;
60
    };
61
 
62
    r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT));
63
    if (r) {
64
        DRM_ERROR("Failed initializing GTT heap.\n");
65
        return r;
66
    }
67
 
68
    return r;
69
 //   return radeon_ttm_init(rdev);
70
}
71
 
72
static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
73
{
74
    uint32_t flags = 0;
75
    if (domain & RADEON_GEM_DOMAIN_VRAM) {
76
        flags |= TTM_PL_FLAG_VRAM;
77
    }
78
    if (domain & RADEON_GEM_DOMAIN_GTT) {
79
        flags |= TTM_PL_FLAG_TT;
80
    }
81
    if (domain & RADEON_GEM_DOMAIN_CPU) {
82
        flags |= TTM_PL_FLAG_SYSTEM;
83
    }
84
    if (!flags) {
85
        flags |= TTM_PL_FLAG_SYSTEM;
86
    }
87
    return flags;
88
}
89
 
90
 
91
int radeon_object_create(struct radeon_device *rdev,
92
             struct drm_gem_object *gobj,
93
             unsigned long size,
94
             bool kernel,
95
             uint32_t domain,
96
             bool interruptible,
97
             struct radeon_object **robj_ptr)
98
{
99
    struct radeon_object *robj;
100
    enum ttm_bo_type type;
101
    uint32_t flags;
102
    int r;
103
 
104
    if (kernel) {
105
        type = ttm_bo_type_kernel;
106
    } else {
107
        type = ttm_bo_type_device;
108
    }
109
    *robj_ptr = NULL;
110
    robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
111
    if (robj == NULL) {
112
        return -ENOMEM;
113
    }
114
    robj->rdev = rdev;
115
//    robj->gobj = gobj;
116
    INIT_LIST_HEAD(&robj->list);
117
 
118
    flags = radeon_object_flags_from_domain(domain);
119
 
120
    robj->flags = flags;
121
 
122
    if( flags & TTM_PL_FLAG_VRAM)
123
    {
124
        size_t num_pages;
125
 
126
        struct drm_mm_node *vm_node;
127
 
128
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
129
 
130
        if (num_pages == 0) {
1182 serge 131
            dbgprintf("Illegal buffer object size.\n");
1120 serge 132
            return -EINVAL;
133
        }
134
retry_pre_get:
135
        r = drm_mm_pre_get(&mm_vram);
136
 
137
        if (unlikely(r != 0))
138
            return r;
139
 
140
        vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0);
141
 
142
        if (unlikely(vm_node == NULL)) {
143
            r = -ENOMEM;
144
            return r;
145
        }
146
 
147
        robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
148
 
149
        if (unlikely(robj->mm_node == NULL)) {
150
            goto retry_pre_get;
151
        }
152
 
153
        robj->vm_addr = ((uint32_t)robj->mm_node->start);
154
 
1268 serge 155
//        dbgprintf("alloc vram: base %x size %x\n",
156
//                   robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
1120 serge 157
 
158
    };
159
 
160
    if( flags & TTM_PL_FLAG_TT)
161
    {
162
        size_t num_pages;
163
 
164
        struct drm_mm_node *vm_node;
165
 
166
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
167
 
168
        if (num_pages == 0) {
1182 serge 169
            dbgprintf("Illegal buffer object size.\n");
1120 serge 170
            return -EINVAL;
171
        }
172
retry_pre_get1:
173
        r = drm_mm_pre_get(&mm_gtt);
174
 
175
        if (unlikely(r != 0))
176
            return r;
177
 
178
        vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0);
179
 
180
        if (unlikely(vm_node == NULL)) {
181
            r = -ENOMEM;
182
            return r;
183
        }
184
 
185
        robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
186
 
187
        if (unlikely(robj->mm_node == NULL)) {
188
            goto retry_pre_get1;
189
        }
190
 
191
        robj->vm_addr = ((uint32_t)robj->mm_node->start) ;
192
 
1268 serge 193
//        dbgprintf("alloc gtt: base %x size %x\n",
194
//                   robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
1120 serge 195
    };
196
 
197
//   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
198
//                  0, 0, false, NULL, size,
199
//                  &radeon_ttm_object_object_destroy);
200
    if (unlikely(r != 0)) {
201
        /* ttm call radeon_ttm_object_object_destroy if error happen */
202
        DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
203
              size, flags, 0);
204
        return r;
205
    }
206
    *robj_ptr = robj;
207
//   if (gobj) {
208
//       list_add_tail(&robj->list, &rdev->gem.objects);
209
//   }
210
    return 0;
211
}
212
 
213
#define page_tabs  0xFDC00000
214
 
215
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
216
              uint64_t *gpu_addr)
217
{
218
    uint32_t flags;
219
    uint32_t tmp;
220
    int r = 0;
221
 
222
//    flags = radeon_object_flags_from_domain(domain);
223
//   spin_lock(&robj->tobj.lock);
224
    if (robj->pin_count) {
225
        robj->pin_count++;
226
        if (gpu_addr != NULL) {
227
            *gpu_addr = robj->gpu_addr;
228
        }
229
//       spin_unlock(&robj->tobj.lock);
230
        return 0;
231
    }
232
//   spin_unlock(&robj->tobj.lock);
233
//    r = radeon_object_reserve(robj, false);
234
//    if (unlikely(r != 0)) {
235
//        DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
236
//        return r;
237
//    }
238
//    tmp = robj->tobj.mem.placement;
239
//    ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
240
//    robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
241
//    r = ttm_buffer_object_validate(&robj->tobj,
242
//                       robj->tobj.proposed_placement,
243
//                       false, false);
244
 
245
    robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT;
246
 
247
    if(robj->flags & TTM_PL_FLAG_VRAM)
248
        robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
249
    else if (robj->flags & TTM_PL_FLAG_TT)
250
    {
251
        u32_t *pagelist;
252
        robj->kptr  = KernelAlloc( robj->mm_node->size << PAGE_SHIFT );
253
        dbgprintf("kernel alloc %x\n", robj->kptr );
254
 
255
        pagelist =  &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12];
256
        dbgprintf("pagelist %x\n", pagelist);
257
        radeon_gart_bind(robj->rdev, robj->gpu_addr,
258
                         robj->mm_node->size,  pagelist);
259
        robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
260
    }
261
    else
262
    {
263
        DRM_ERROR("Unknown placement %d\n", robj->flags);
264
        robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
265
        r = -1;
266
    };
267
 
268
//    flags & TTM_PL_FLAG_VRAM
269
    if (gpu_addr != NULL) {
270
        *gpu_addr = robj->gpu_addr;
271
    }
272
    robj->pin_count = 1;
273
    if (unlikely(r != 0)) {
274
        DRM_ERROR("radeon: failed to pin object.\n");
275
    }
276
 
277
    return r;
278
}
279
 
280
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
281
{
282
    int r = 0;
283
 
284
//   spin_lock(&robj->tobj.lock);
285
    if (robj->kptr) {
286
        if (ptr) {
287
            *ptr = robj->kptr;
288
        }
289
//       spin_unlock(&robj->tobj.lock);
290
        return 0;
291
    }
292
//   spin_unlock(&robj->tobj.lock);
293
 
294
    if(robj->flags & TTM_PL_FLAG_VRAM)
295
    {
296
        robj->cpu_addr = robj->rdev->mc.aper_base +
297
                         (robj->vm_addr << PAGE_SHIFT);
298
        robj->kptr = (void*)MapIoMem(robj->cpu_addr,
299
                           robj->mm_node->size << 12, PG_SW);
1268 serge 300
//        dbgprintf("map io mem %x at %x\n", robj->cpu_addr, robj->kptr);
1120 serge 301
 
302
    }
303
    else
304
    {
305
        return -1;
306
    }
307
 
308
    if (ptr) {
309
        *ptr = robj->kptr;
310
    }
311
 
312
    return 0;
313
}
314
 
1182 serge 315
void radeon_object_kunmap(struct radeon_object *robj)
316
{
317
//   spin_lock(&robj->tobj.lock);
318
    if (robj->kptr == NULL) {
319
//       spin_unlock(&robj->tobj.lock);
320
        return;
321
    }
1120 serge 322
 
1182 serge 323
    if (robj->flags & TTM_PL_FLAG_VRAM)
324
    {
325
        FreeKernelSpace(robj->kptr);
326
        robj->kptr = NULL;
327
    }
328
//   spin_unlock(&robj->tobj.lock);
329
}
330
 
1120 serge 331
#if 0
332
 
333
void radeon_object_unpin(struct radeon_object *robj)
334
{
335
    uint32_t flags;
336
    int r;
337
 
338
//   spin_lock(&robj->tobj.lock);
339
    if (!robj->pin_count) {
340
//       spin_unlock(&robj->tobj.lock);
341
        printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
342
        return;
343
    }
344
    robj->pin_count--;
345
    if (robj->pin_count) {
346
//       spin_unlock(&robj->tobj.lock);
347
        return;
348
    }
349
//   spin_unlock(&robj->tobj.lock);
350
    r = radeon_object_reserve(robj, false);
351
    if (unlikely(r != 0)) {
352
        DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
353
        return;
354
    }
355
    flags = robj->tobj.mem.placement;
356
    robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
357
    r = ttm_buffer_object_validate(&robj->tobj,
358
                       robj->tobj.proposed_placement,
359
                       false, false);
360
    if (unlikely(r != 0)) {
361
        DRM_ERROR("radeon: failed to unpin buffer.\n");
362
    }
363
    radeon_object_unreserve(robj);
364
}
365
 
366
 
367
 
368
 
369
 
370
/*
371
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
372
 * function are calling it.
373
 */
374
 
375
static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
376
{
377
	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
378
}
379
 
380
static void radeon_object_unreserve(struct radeon_object *robj)
381
{
382
	ttm_bo_unreserve(&robj->tobj);
383
}
384
 
385
static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
386
{
387
	struct radeon_object *robj;
388
 
389
	robj = container_of(tobj, struct radeon_object, tobj);
390
//   list_del_init(&robj->list);
391
	kfree(robj);
392
}
393
 
394
static inline void radeon_object_gpu_addr(struct radeon_object *robj)
395
{
396
	/* Default gpu address */
397
	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
398
	if (robj->tobj.mem.mm_node == NULL) {
399
		return;
400
	}
401
	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
402
	switch (robj->tobj.mem.mem_type) {
403
	case TTM_PL_VRAM:
404
		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
405
		break;
406
	case TTM_PL_TT:
407
		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
408
		break;
409
	default:
410
		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
411
		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
412
		return;
413
	}
414
}
415
 
416
 
417
int radeon_object_create(struct radeon_device *rdev,
418
			 struct drm_gem_object *gobj,
419
			 unsigned long size,
420
			 bool kernel,
421
			 uint32_t domain,
422
			 bool interruptible,
423
			 struct radeon_object **robj_ptr)
424
{
425
	struct radeon_object *robj;
426
	enum ttm_bo_type type;
427
	uint32_t flags;
428
	int r;
429
 
430
//   if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
431
//       rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
432
//   }
433
	if (kernel) {
434
		type = ttm_bo_type_kernel;
435
	} else {
436
		type = ttm_bo_type_device;
437
	}
438
	*robj_ptr = NULL;
439
	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
440
	if (robj == NULL) {
441
		return -ENOMEM;
442
	}
443
	robj->rdev = rdev;
444
	robj->gobj = gobj;
445
//   INIT_LIST_HEAD(&robj->list);
446
 
447
	flags = radeon_object_flags_from_domain(domain);
448
//   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
449
//                  0, 0, false, NULL, size,
450
//                  &radeon_ttm_object_object_destroy);
451
	if (unlikely(r != 0)) {
452
		/* ttm call radeon_ttm_object_object_destroy if error happen */
453
		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
454
			  size, flags, 0);
455
		return r;
456
	}
457
	*robj_ptr = robj;
458
//   if (gobj) {
459
//       list_add_tail(&robj->list, &rdev->gem.objects);
460
//   }
461
	return 0;
462
}
463
 
464
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
465
{
466
	int r;
467
 
468
//   spin_lock(&robj->tobj.lock);
469
	if (robj->kptr) {
470
		if (ptr) {
471
			*ptr = robj->kptr;
472
		}
473
//       spin_unlock(&robj->tobj.lock);
474
		return 0;
475
	}
476
//   spin_unlock(&robj->tobj.lock);
477
	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
478
	if (r) {
479
		return r;
480
	}
481
//   spin_lock(&robj->tobj.lock);
482
	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
483
//   spin_unlock(&robj->tobj.lock);
484
	if (ptr) {
485
		*ptr = robj->kptr;
486
	}
487
	return 0;
488
}
489
 
490
void radeon_object_kunmap(struct radeon_object *robj)
491
{
492
//   spin_lock(&robj->tobj.lock);
493
	if (robj->kptr == NULL) {
494
//       spin_unlock(&robj->tobj.lock);
495
		return;
496
	}
497
	robj->kptr = NULL;
498
//   spin_unlock(&robj->tobj.lock);
499
	ttm_bo_kunmap(&robj->kmap);
500
}
501
 
502
void radeon_object_unref(struct radeon_object **robj)
503
{
504
	struct ttm_buffer_object *tobj;
505
 
506
	if ((*robj) == NULL) {
507
		return;
508
	}
509
	tobj = &((*robj)->tobj);
510
	ttm_bo_unref(&tobj);
511
	if (tobj == NULL) {
512
		*robj = NULL;
513
	}
514
}
515
 
516
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
517
{
518
	*offset = robj->tobj.addr_space_offset;
519
	return 0;
520
}
521
 
522
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
523
		      uint64_t *gpu_addr)
524
{
525
	uint32_t flags;
526
	uint32_t tmp;
527
	int r;
528
 
529
	flags = radeon_object_flags_from_domain(domain);
530
//   spin_lock(&robj->tobj.lock);
531
	if (robj->pin_count) {
532
		robj->pin_count++;
533
		if (gpu_addr != NULL) {
534
			*gpu_addr = robj->gpu_addr;
535
		}
536
//       spin_unlock(&robj->tobj.lock);
537
		return 0;
538
	}
539
//   spin_unlock(&robj->tobj.lock);
540
	r = radeon_object_reserve(robj, false);
541
	if (unlikely(r != 0)) {
542
		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
543
		return r;
544
	}
545
	tmp = robj->tobj.mem.placement;
546
	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
547
	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
548
	r = ttm_buffer_object_validate(&robj->tobj,
549
				       robj->tobj.proposed_placement,
550
				       false, false);
551
	radeon_object_gpu_addr(robj);
552
	if (gpu_addr != NULL) {
553
		*gpu_addr = robj->gpu_addr;
554
	}
555
	robj->pin_count = 1;
556
	if (unlikely(r != 0)) {
557
		DRM_ERROR("radeon: failed to pin object.\n");
558
	}
559
	radeon_object_unreserve(robj);
560
	return r;
561
}
562
 
563
void radeon_object_unpin(struct radeon_object *robj)
564
{
565
	uint32_t flags;
566
	int r;
567
 
568
//   spin_lock(&robj->tobj.lock);
569
	if (!robj->pin_count) {
570
//       spin_unlock(&robj->tobj.lock);
571
		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
572
		return;
573
	}
574
	robj->pin_count--;
575
	if (robj->pin_count) {
576
//       spin_unlock(&robj->tobj.lock);
577
		return;
578
	}
579
//   spin_unlock(&robj->tobj.lock);
580
	r = radeon_object_reserve(robj, false);
581
	if (unlikely(r != 0)) {
582
		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
583
		return;
584
	}
585
	flags = robj->tobj.mem.placement;
586
	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
587
	r = ttm_buffer_object_validate(&robj->tobj,
588
				       robj->tobj.proposed_placement,
589
				       false, false);
590
	if (unlikely(r != 0)) {
591
		DRM_ERROR("radeon: failed to unpin buffer.\n");
592
	}
593
	radeon_object_unreserve(robj);
594
}
595
 
596
int radeon_object_wait(struct radeon_object *robj)
597
{
598
	int r = 0;
599
 
600
	/* FIXME: should use block reservation instead */
601
	r = radeon_object_reserve(robj, true);
602
	if (unlikely(r != 0)) {
603
		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
604
		return r;
605
	}
606
//   spin_lock(&robj->tobj.lock);
607
	if (robj->tobj.sync_obj) {
608
		r = ttm_bo_wait(&robj->tobj, true, false, false);
609
	}
610
//   spin_unlock(&robj->tobj.lock);
611
	radeon_object_unreserve(robj);
612
	return r;
613
}
614
 
615
int radeon_object_evict_vram(struct radeon_device *rdev)
616
{
617
	if (rdev->flags & RADEON_IS_IGP) {
618
		/* Useless to evict on IGP chips */
619
		return 0;
620
	}
621
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
622
}
623
 
624
void radeon_object_force_delete(struct radeon_device *rdev)
625
{
626
	struct radeon_object *robj, *n;
627
	struct drm_gem_object *gobj;
628
 
629
	if (list_empty(&rdev->gem.objects)) {
630
		return;
631
	}
632
	DRM_ERROR("Userspace still has active objects !\n");
633
	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
634
		mutex_lock(&rdev->ddev->struct_mutex);
635
		gobj = robj->gobj;
636
		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
637
			  gobj, robj, (unsigned long)gobj->size,
638
			  *((unsigned long *)&gobj->refcount));
639
		list_del_init(&robj->list);
640
		radeon_object_unref(&robj);
641
		gobj->driver_private = NULL;
642
		drm_gem_object_unreference(gobj);
643
		mutex_unlock(&rdev->ddev->struct_mutex);
644
	}
645
}
646
 
647
void radeon_object_fini(struct radeon_device *rdev)
648
{
649
	radeon_ttm_fini(rdev);
650
}
651
 
652
void radeon_object_list_add_object(struct radeon_object_list *lobj,
653
				   struct list_head *head)
654
{
655
	if (lobj->wdomain) {
656
		list_add(&lobj->list, head);
657
	} else {
658
		list_add_tail(&lobj->list, head);
659
	}
660
}
661
 
662
int radeon_object_list_reserve(struct list_head *head)
663
{
664
	struct radeon_object_list *lobj;
665
	struct list_head *i;
666
	int r;
667
 
668
	list_for_each(i, head) {
669
		lobj = list_entry(i, struct radeon_object_list, list);
670
		if (!lobj->robj->pin_count) {
671
			r = radeon_object_reserve(lobj->robj, true);
672
			if (unlikely(r != 0)) {
673
				DRM_ERROR("radeon: failed to reserve object.\n");
674
				return r;
675
			}
676
		} else {
677
		}
678
	}
679
	return 0;
680
}
681
 
682
void radeon_object_list_unreserve(struct list_head *head)
683
{
684
	struct radeon_object_list *lobj;
685
	struct list_head *i;
686
 
687
	list_for_each(i, head) {
688
		lobj = list_entry(i, struct radeon_object_list, list);
689
		if (!lobj->robj->pin_count) {
690
			radeon_object_unreserve(lobj->robj);
691
		} else {
692
		}
693
	}
694
}
695
 
696
int radeon_object_list_validate(struct list_head *head, void *fence)
697
{
698
	struct radeon_object_list *lobj;
699
	struct radeon_object *robj;
700
	struct radeon_fence *old_fence = NULL;
701
	struct list_head *i;
702
	uint32_t flags;
703
	int r;
704
 
705
	r = radeon_object_list_reserve(head);
706
	if (unlikely(r != 0)) {
707
		radeon_object_list_unreserve(head);
708
		return r;
709
	}
710
	list_for_each(i, head) {
711
		lobj = list_entry(i, struct radeon_object_list, list);
712
		robj = lobj->robj;
713
		if (lobj->wdomain) {
714
			flags = radeon_object_flags_from_domain(lobj->wdomain);
715
			flags |= TTM_PL_FLAG_TT;
716
		} else {
717
			flags = radeon_object_flags_from_domain(lobj->rdomain);
718
			flags |= TTM_PL_FLAG_TT;
719
			flags |= TTM_PL_FLAG_VRAM;
720
		}
721
		if (!robj->pin_count) {
722
			robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
723
			r = ttm_buffer_object_validate(&robj->tobj,
724
						       robj->tobj.proposed_placement,
725
						       true, false);
726
			if (unlikely(r)) {
727
				radeon_object_list_unreserve(head);
728
				DRM_ERROR("radeon: failed to validate.\n");
729
				return r;
730
			}
731
			radeon_object_gpu_addr(robj);
732
		}
733
		lobj->gpu_offset = robj->gpu_addr;
734
		if (fence) {
735
			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
736
			robj->tobj.sync_obj = radeon_fence_ref(fence);
737
			robj->tobj.sync_obj_arg = NULL;
738
		}
739
		if (old_fence) {
740
			radeon_fence_unref(&old_fence);
741
		}
742
	}
743
	return 0;
744
}
745
 
746
void radeon_object_list_unvalidate(struct list_head *head)
747
{
748
	struct radeon_object_list *lobj;
749
	struct radeon_fence *old_fence = NULL;
750
	struct list_head *i;
751
 
752
	list_for_each(i, head) {
753
		lobj = list_entry(i, struct radeon_object_list, list);
754
		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
755
		lobj->robj->tobj.sync_obj = NULL;
756
		if (old_fence) {
757
			radeon_fence_unref(&old_fence);
758
		}
759
	}
760
	radeon_object_list_unreserve(head);
761
}
762
 
763
void radeon_object_list_clean(struct list_head *head)
764
{
765
	radeon_object_list_unreserve(head);
766
}
767
 
768
int radeon_object_fbdev_mmap(struct radeon_object *robj,
769
			     struct vm_area_struct *vma)
770
{
771
	return ttm_fbdev_mmap(vma, &robj->tobj);
772
}
773
 
1128 serge 774
#endif
775
 
1120 serge 776
unsigned long radeon_object_size(struct radeon_object *robj)
777
{
778
	return robj->tobj.num_pages << PAGE_SHIFT;
779
}
780