Subversion Repositories Kolibri OS

Rev

Rev 1986 | Rev 5078 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1986 Rev 2997
Line 23... Line 23...
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include "drmP.h"
28
#include 
29
#include "drm.h"
-
 
30
#include "radeon_drm.h"
29
#include 
31
#include "radeon.h"
30
#include "radeon.h"
Line 32... Line 31...
32
 
31
 
33
int radeon_gem_object_init(struct drm_gem_object *obj)
32
int radeon_gem_object_init(struct drm_gem_object *obj)
34
{
33
{
-
 
34
	BUG();
35
	/* we do nothings here */
35
 
36
	return 0;
36
	return 0;
Line 37... Line 37...
37
}
37
}
38
 
38
 
Line 49... Line 49...
49
                 int alignment, int initial_domain,
49
                int alignment, int initial_domain,
50
                 bool discardable, bool kernel,
50
                bool discardable, bool kernel,
51
				struct drm_gem_object **obj)
51
                struct drm_gem_object **obj)
52
{
52
{
53
    struct radeon_bo *robj;
53
    struct radeon_bo *robj;
-
 
54
	unsigned long max_size;
54
	int r;
55
	int r;
Line 55... Line 56...
55
 
56
 
56
	*obj = NULL;
57
	*obj = NULL;
57
	/* At least align on page size */
58
	/* At least align on page size */
58
	if (alignment < PAGE_SIZE) {
59
	if (alignment < PAGE_SIZE) {
59
		alignment = PAGE_SIZE;
60
		alignment = PAGE_SIZE;
-
 
61
	}
-
 
62
 
-
 
63
	/* maximun bo size is the minimun btw visible vram and gtt size */
-
 
64
	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
-
 
65
	if (size > max_size) {
-
 
66
		printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
-
 
67
		       __func__, __LINE__, size >> 20, max_size >> 20);
-
 
68
		return -ENOMEM;
-
 
69
	}
-
 
70
 
60
	}
71
retry:
61
	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
72
	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
62
	if (r) {
73
	if (r) {
-
 
74
		if (r != -ERESTARTSYS) {
-
 
75
			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
-
 
76
				initial_domain |= RADEON_GEM_DOMAIN_GTT;
-
 
77
				goto retry;
63
		if (r != -ERESTARTSYS)
78
			}
64
			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
79
			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
-
 
80
				  size, initial_domain, alignment, r);
65
				  size, initial_domain, alignment, r);
81
		}
66
        return r;
82
        return r;
67
	}
83
	}
Line 68... Line 84...
68
	*obj = &robj->gem_base;
84
	*obj = &robj->gem_base;
Line 114... Line 130...
114
	if (!domain) {
130
	if (!domain) {
115
		domain = rdomain;
131
		domain = rdomain;
116
	}
132
	}
117
	if (!domain) {
133
	if (!domain) {
118
		/* Do nothings */
134
		/* Do nothings */
119
		printk(KERN_WARNING "Set domain withou domain !\n");
135
		printk(KERN_WARNING "Set domain without domain !\n");
120
		return 0;
136
		return 0;
121
	}
137
	}
122
	if (domain == RADEON_GEM_DOMAIN_CPU) {
138
	if (domain == RADEON_GEM_DOMAIN_CPU) {
123
		/* Asking for cpu access wait for object idle */
139
		/* Asking for cpu access wait for object idle */
124
//		r = radeon_bo_wait(robj, NULL, false);
140
//		r = radeon_bo_wait(robj, NULL, false);
Line 149... Line 165...
149
			  struct drm_file *filp)
165
			  struct drm_file *filp)
150
{
166
{
151
	struct radeon_device *rdev = dev->dev_private;
167
	struct radeon_device *rdev = dev->dev_private;
152
	struct drm_radeon_gem_info *args = data;
168
	struct drm_radeon_gem_info *args = data;
153
	struct ttm_mem_type_manager *man;
169
	struct ttm_mem_type_manager *man;
-
 
170
	unsigned i;
Line 154... Line 171...
154
 
171
 
Line 155... Line 172...
155
	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
172
	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
156
 
173
 
157
	args->vram_size = rdev->mc.real_vram_size;
174
	args->vram_size = rdev->mc.real_vram_size;
158
	args->vram_visible = (u64)man->size << PAGE_SHIFT;
175
	args->vram_visible = (u64)man->size << PAGE_SHIFT;
159
	if (rdev->stollen_vga_memory)
176
	if (rdev->stollen_vga_memory)
160
		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
177
		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
161
	args->vram_visible -= radeon_fbdev_total_size(rdev);
178
	args->vram_visible -= radeon_fbdev_total_size(rdev);
-
 
179
	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
162
	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
180
	for(i = 0; i < RADEON_NUM_RINGS; ++i)
163
		RADEON_IB_POOL_SIZE*64*1024;
181
		args->gart_size -= rdev->ring[i].ring_size;
Line 164... Line 182...
164
	return 0;
182
	return 0;
165
}
183
}
Line 187... Line 205...
187
	struct drm_radeon_gem_create *args = data;
205
	struct drm_radeon_gem_create *args = data;
188
	struct drm_gem_object *gobj;
206
	struct drm_gem_object *gobj;
189
	uint32_t handle;
207
	uint32_t handle;
190
	int r;
208
	int r;
Line -... Line 209...
-
 
209
 
191
 
210
	down_read(&rdev->exclusive_lock);
192
	/* create a gem object to contain this object in */
211
	/* create a gem object to contain this object in */
193
	args->size = roundup(args->size, PAGE_SIZE);
212
	args->size = roundup(args->size, PAGE_SIZE);
194
	r = radeon_gem_object_create(rdev, args->size, args->alignment,
213
	r = radeon_gem_object_create(rdev, args->size, args->alignment,
195
				     args->initial_domain, false,
214
				     args->initial_domain, false,
196
					false, &gobj);
215
					false, &gobj);
-
 
216
	if (r) {
-
 
217
		up_read(&rdev->exclusive_lock);
197
	if (r) {
218
		r = radeon_gem_handle_lockup(rdev, r);
198
		return r;
219
		return r;
199
	}
220
	}
200
	r = drm_gem_handle_create(filp, gobj, &handle);
221
	r = drm_gem_handle_create(filp, gobj, &handle);
201
	/* drop reference from allocate - handle holds it now */
222
	/* drop reference from allocate - handle holds it now */
202
	drm_gem_object_unreference_unlocked(gobj);
223
	drm_gem_object_unreference_unlocked(gobj);
-
 
224
	if (r) {
-
 
225
		up_read(&rdev->exclusive_lock);
203
	if (r) {
226
		r = radeon_gem_handle_lockup(rdev, r);
204
		return r;
227
		return r;
205
	}
228
	}
-
 
229
	args->handle = handle;
206
	args->handle = handle;
230
	up_read(&rdev->exclusive_lock);
207
	return 0;
231
	return 0;
Line 208... Line 232...
208
}
232
}
209
 
233
 
210
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
234
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
211
				struct drm_file *filp)
235
				struct drm_file *filp)
212
{
236
{
-
 
237
	/* transition the BO to a domain -
213
	/* transition the BO to a domain -
238
	 * just validate the BO into a certain domain */
214
	 * just validate the BO into a certain domain */
239
	struct radeon_device *rdev = dev->dev_private;
215
	struct drm_radeon_gem_set_domain *args = data;
240
	struct drm_radeon_gem_set_domain *args = data;
216
	struct drm_gem_object *gobj;
241
	struct drm_gem_object *gobj;
Line 217... Line 242...
217
	struct radeon_bo *robj;
242
	struct radeon_bo *robj;
218
	int r;
243
	int r;
-
 
244
 
Line 219... Line 245...
219
 
245
	/* for now if someone requests domain CPU -
220
	/* for now if someone requests domain CPU -
246
	 * just make sure the buffer is finished with */
221
	 * just make sure the buffer is finished with */
247
	down_read(&rdev->exclusive_lock);
-
 
248
 
222
 
249
	/* just do a BO wait for now */
223
	/* just do a BO wait for now */
250
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
224
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
251
	if (gobj == NULL) {
Line 225... Line 252...
225
	if (gobj == NULL) {
252
		up_read(&rdev->exclusive_lock);
Line 226... Line 253...
226
		return -ENOENT;
253
		return -ENOENT;
-
 
254
	}
-
 
255
	robj = gem_to_radeon_bo(gobj);
227
	}
256
 
228
	robj = gem_to_radeon_bo(gobj);
257
	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
Line 229... Line 258...
229
 
258
 
230
	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
259
	drm_gem_object_unreference_unlocked(gobj);
Line 259... Line 288...
259
}
288
}
Line 260... Line 289...
260
 
289
 
261
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
290
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
262
			  struct drm_file *filp)
291
			  struct drm_file *filp)
-
 
292
{
263
{
293
	struct radeon_device *rdev = dev->dev_private;
264
	struct drm_radeon_gem_busy *args = data;
294
	struct drm_radeon_gem_busy *args = data;
265
	struct drm_gem_object *gobj;
295
	struct drm_gem_object *gobj;
266
	struct radeon_bo *robj;
296
	struct radeon_bo *robj;
267
	int r;
297
	int r;
Line 284... Line 314...
284
		args->domain = RADEON_GEM_DOMAIN_CPU;
314
		args->domain = RADEON_GEM_DOMAIN_CPU;
285
	default:
315
	default:
286
		break;
316
		break;
287
	}
317
	}
288
	drm_gem_object_unreference_unlocked(gobj);
318
	drm_gem_object_unreference_unlocked(gobj);
-
 
319
	r = radeon_gem_handle_lockup(rdev, r);
289
	return r;
320
	return r;
290
}
321
}
Line 291... Line 322...
291
 
322
 
292
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
323
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
293
			      struct drm_file *filp)
324
			      struct drm_file *filp)
-
 
325
{
294
{
326
	struct radeon_device *rdev = dev->dev_private;
295
	struct drm_radeon_gem_wait_idle *args = data;
327
	struct drm_radeon_gem_wait_idle *args = data;
296
	struct drm_gem_object *gobj;
328
	struct drm_gem_object *gobj;
297
	struct radeon_bo *robj;
329
	struct radeon_bo *robj;
Line 302... Line 334...
302
		return -ENOENT;
334
		return -ENOENT;
303
	}
335
	}
304
	robj = gem_to_radeon_bo(gobj);
336
	robj = gem_to_radeon_bo(gobj);
305
	r = radeon_bo_wait(robj, NULL, false);
337
	r = radeon_bo_wait(robj, NULL, false);
306
	/* callback hw specific functions if any */
338
	/* callback hw specific functions if any */
307
	if (robj->rdev->asic->ioctl_wait_idle)
339
	if (rdev->asic->ioctl_wait_idle)
308
		robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
340
		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
309
	drm_gem_object_unreference_unlocked(gobj);
341
	drm_gem_object_unreference_unlocked(gobj);
-
 
342
	r = radeon_gem_handle_lockup(rdev, r);
310
	return r;
343
	return r;
311
}
344
}
Line 312... Line 345...
312
 
345
 
313
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
346
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,