Subversion Repositories Kolibri OS

Rev

Rev 2997 | Rev 5271 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2997 Rev 5078
Line 27... Line 27...
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "radeon.h"
30
#include "radeon.h"
Line 31... Line -...
31
 
-
 
32
int radeon_gem_object_init(struct drm_gem_object *obj)
-
 
33
{
-
 
34
	BUG();
-
 
35
 
-
 
36
	return 0;
-
 
37
}
-
 
38
 
31
 
39
void radeon_gem_object_free(struct drm_gem_object *gobj)
32
void radeon_gem_object_free(struct drm_gem_object *gobj)
40
{
33
{
Line 41... Line 34...
41
	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
34
	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
42
 
35
 
43
	if (robj) {
36
	if (robj) {
44
		radeon_bo_unref(&robj);
37
		radeon_bo_unref(&robj);
Line 45... Line 38...
45
	}
38
	}
46
}
39
}
47
 
40
 
48
int radeon_gem_object_create(struct radeon_device *rdev, int size,
41
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
49
                int alignment, int initial_domain,
42
                int alignment, int initial_domain,
50
                bool discardable, bool kernel,
43
				u32 flags, bool kernel,
51
                struct drm_gem_object **obj)
44
                struct drm_gem_object **obj)
52
{
45
{
Line 58... Line 51...
58
	/* At least align on page size */
51
	/* At least align on page size */
59
	if (alignment < PAGE_SIZE) {
52
	if (alignment < PAGE_SIZE) {
60
		alignment = PAGE_SIZE;
53
		alignment = PAGE_SIZE;
61
	}
54
	}
Line 62... Line 55...
62
 
55
 
-
 
56
	/* Maximum bo size is the unpinned gtt size since we use the gtt to
-
 
57
	 * handle vram to system pool migrations.
63
	/* maximun bo size is the minimun btw visible vram and gtt size */
58
	 */
64
	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
59
	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
65
	if (size > max_size) {
60
	if (size > max_size) {
66
		printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
61
		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67
		       __func__, __LINE__, size >> 20, max_size >> 20);
62
			  size >> 20, max_size >> 20);
68
		return -ENOMEM;
63
		return -ENOMEM;
Line 69... Line 64...
69
	}
64
	}
70
 
65
 
-
 
66
retry:
71
retry:
67
	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
72
	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
68
			     flags, NULL, &robj);
73
	if (r) {
69
	if (r) {
74
		if (r != -ERESTARTSYS) {
70
		if (r != -ERESTARTSYS) {
75
			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
71
			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
76
				initial_domain |= RADEON_GEM_DOMAIN_GTT;
72
				initial_domain |= RADEON_GEM_DOMAIN_GTT;
77
				goto retry;
73
				goto retry;
78
			}
74
			}
79
			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
75
			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
80
				  size, initial_domain, alignment, r);
76
				  size, initial_domain, alignment, r);
81
		}
77
		}
82
        return r;
78
        return r;
Line 88... Line 84...
88
	mutex_unlock(&rdev->gem.mutex);
84
	mutex_unlock(&rdev->gem.mutex);
Line 89... Line 85...
89
 
85
 
90
	return 0;
86
	return 0;
Line 91... Line -...
91
}
-
 
92
 
-
 
93
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
-
 
94
			  uint64_t *gpu_addr)
-
 
95
{
-
 
96
	struct radeon_bo *robj = gem_to_radeon_bo(obj);
-
 
97
	int r;
-
 
98
 
-
 
99
	r = radeon_bo_reserve(robj, false);
-
 
100
	if (unlikely(r != 0))
-
 
101
		return r;
-
 
102
	r = radeon_bo_pin(robj, pin_domain, gpu_addr);
-
 
103
	radeon_bo_unreserve(robj);
-
 
104
	return r;
-
 
105
}
-
 
106
 
-
 
107
void radeon_gem_object_unpin(struct drm_gem_object *obj)
-
 
108
{
-
 
109
	struct radeon_bo *robj = gem_to_radeon_bo(obj);
-
 
110
	int r;
-
 
111
 
-
 
112
	r = radeon_bo_reserve(robj, false);
-
 
113
	if (likely(r == 0)) {
-
 
114
		radeon_bo_unpin(robj);
-
 
115
		radeon_bo_unreserve(robj);
-
 
116
	}
-
 
117
}
87
}
118
 
88
 
119
int radeon_gem_set_domain(struct drm_gem_object *gobj,
89
static int radeon_gem_set_domain(struct drm_gem_object *gobj,
120
			  uint32_t rdomain, uint32_t wdomain)
90
			  uint32_t rdomain, uint32_t wdomain)
121
{
91
{
122
	struct radeon_bo *robj;
92
	struct radeon_bo *robj;
Line 165... Line 135...
165
			  struct drm_file *filp)
135
			  struct drm_file *filp)
166
{
136
{
167
	struct radeon_device *rdev = dev->dev_private;
137
	struct radeon_device *rdev = dev->dev_private;
168
	struct drm_radeon_gem_info *args = data;
138
	struct drm_radeon_gem_info *args = data;
169
	struct ttm_mem_type_manager *man;
139
	struct ttm_mem_type_manager *man;
170
	unsigned i;
-
 
Line 171... Line 140...
171
 
140
 
Line 172... Line 141...
172
	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
141
	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
173
 
142
 
174
	args->vram_size = rdev->mc.real_vram_size;
-
 
175
	args->vram_visible = (u64)man->size << PAGE_SHIFT;
-
 
176
	if (rdev->stollen_vga_memory)
143
	args->vram_size = rdev->mc.real_vram_size;
177
		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
144
	args->vram_visible = (u64)man->size << PAGE_SHIFT;
178
	args->vram_visible -= radeon_fbdev_total_size(rdev);
-
 
179
	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
145
	args->vram_visible -= rdev->vram_pin_size;
-
 
146
	args->gart_size = rdev->mc.gtt_size;
180
	for(i = 0; i < RADEON_NUM_RINGS; ++i)
147
	args->gart_size -= rdev->gart_pin_size;
181
		args->gart_size -= rdev->ring[i].ring_size;
148
 
Line 182... Line 149...
182
	return 0;
149
	return 0;
183
}
150
}
Line 209... Line 176...
209
 
176
 
210
	down_read(&rdev->exclusive_lock);
177
	down_read(&rdev->exclusive_lock);
211
	/* create a gem object to contain this object in */
178
	/* create a gem object to contain this object in */
212
	args->size = roundup(args->size, PAGE_SIZE);
179
	args->size = roundup(args->size, PAGE_SIZE);
213
	r = radeon_gem_object_create(rdev, args->size, args->alignment,
180
	r = radeon_gem_object_create(rdev, args->size, args->alignment,
214
				     args->initial_domain, false,
181
				     args->initial_domain, args->flags,
215
					false, &gobj);
182
					false, &gobj);
216
	if (r) {
183
	if (r) {
217
		up_read(&rdev->exclusive_lock);
184
		up_read(&rdev->exclusive_lock);
218
		r = radeon_gem_handle_lockup(rdev, r);
185
		r = radeon_gem_handle_lockup(rdev, r);
Line 301... Line 268...
301
	if (gobj == NULL) {
268
	if (gobj == NULL) {
302
		return -ENOENT;
269
		return -ENOENT;
303
	}
270
	}
304
	robj = gem_to_radeon_bo(gobj);
271
	robj = gem_to_radeon_bo(gobj);
305
	r = radeon_bo_wait(robj, &cur_placement, true);
272
	r = radeon_bo_wait(robj, &cur_placement, true);
306
	switch (cur_placement) {
-
 
307
	case TTM_PL_VRAM:
-
 
308
		args->domain = RADEON_GEM_DOMAIN_VRAM;
273
	args->domain = radeon_mem_type_to_domain(cur_placement);
309
		break;
-
 
310
	case TTM_PL_TT:
-
 
311
		args->domain = RADEON_GEM_DOMAIN_GTT;
-
 
312
		break;
-
 
313
	case TTM_PL_SYSTEM:
-
 
314
		args->domain = RADEON_GEM_DOMAIN_CPU;
-
 
315
	default:
-
 
316
		break;
-
 
317
	}
-
 
318
	drm_gem_object_unreference_unlocked(gobj);
274
	drm_gem_object_unreference_unlocked(gobj);
319
	r = radeon_gem_handle_lockup(rdev, r);
275
	r = radeon_gem_handle_lockup(rdev, r);
320
	return r;
276
	return r;
321
}
277
}
Line 326... Line 282...
326
	struct radeon_device *rdev = dev->dev_private;
282
	struct radeon_device *rdev = dev->dev_private;
327
	struct drm_radeon_gem_wait_idle *args = data;
283
	struct drm_radeon_gem_wait_idle *args = data;
328
	struct drm_gem_object *gobj;
284
	struct drm_gem_object *gobj;
329
	struct radeon_bo *robj;
285
	struct radeon_bo *robj;
330
	int r;
286
	int r;
-
 
287
	uint32_t cur_placement = 0;
Line 331... Line 288...
331
 
288
 
332
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
289
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
333
	if (gobj == NULL) {
290
	if (gobj == NULL) {
334
		return -ENOENT;
291
		return -ENOENT;
335
	}
292
	}
336
	robj = gem_to_radeon_bo(gobj);
293
	robj = gem_to_radeon_bo(gobj);
337
	r = radeon_bo_wait(robj, NULL, false);
294
	r = radeon_bo_wait(robj, &cur_placement, false);
338
	/* callback hw specific functions if any */
295
	/* Flush HDP cache via MMIO if necessary */
-
 
296
	if (rdev->asic->mmio_hdp_flush &&
339
	if (rdev->asic->ioctl_wait_idle)
297
	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
340
		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
298
		robj->rdev->asic->mmio_hdp_flush(rdev);
341
	drm_gem_object_unreference_unlocked(gobj);
299
	drm_gem_object_unreference_unlocked(gobj);
342
	r = radeon_gem_handle_lockup(rdev, r);
300
	r = radeon_gem_handle_lockup(rdev, r);
343
	return r;
301
	return r;