Subversion Repositories Kolibri OS

Rev

Rev 6104 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2008 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice (including the next
13
 * paragraph) shall be included in all copies or substantial portions of the
14
 * Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
 * DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors:
25
 *    Jerome Glisse 
26
 */
27
#include 
28
#include 
29
#include 
30
#include "radeon_reg.h"
31
#include "radeon.h"
32
#include "radeon_trace.h"
33
 
34
#define RADEON_CS_MAX_PRIORITY		32u
35
#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
36
 
37
/* This is based on the bucket sort with O(n) time complexity.
38
 * An item with priority "i" is added to bucket[i]. The lists are then
39
 * concatenated in descending order.
40
 */
41
struct radeon_cs_buckets {
42
	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43
};
44
 
45
static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46
{
47
	unsigned i;
48
 
49
	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50
		INIT_LIST_HEAD(&b->bucket[i]);
51
}
52
 
53
static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54
				  struct list_head *item, unsigned priority)
55
{
56
	/* Since buffers which appear sooner in the relocation list are
57
	 * likely to be used more often than buffers which appear later
58
	 * in the list, the sort mustn't change the ordering of buffers
59
	 * with the same priority, i.e. it must be stable.
60
	 */
61
	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62
}
63
 
64
static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65
				       struct list_head *out_list)
66
{
67
	unsigned i;
68
 
69
	/* Connect the sorted buckets in the output list. */
70
	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71
		list_splice(&b->bucket[i], out_list);
72
	}
73
}
74
 
75
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
76
{
77
	struct drm_device *ddev = p->rdev->ddev;
78
	struct radeon_cs_chunk *chunk;
79
	struct radeon_cs_buckets buckets;
5271 serge 80
	unsigned i;
81
	bool need_mmap_lock = false;
82
	int r;
5078 serge 83
 
5271 serge 84
	if (p->chunk_relocs == NULL) {
5078 serge 85
		return 0;
86
	}
5271 serge 87
	chunk = p->chunk_relocs;
5078 serge 88
	p->dma_reloc_idx = 0;
89
	/* FIXME: we assume that each relocs use 4 dwords */
90
	p->nrelocs = chunk->length_dw / 4;
5271 serge 91
	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
5078 serge 92
	if (p->relocs == NULL) {
93
		return -ENOMEM;
94
	}
95
 
96
	radeon_cs_buckets_init(&buckets);
97
 
98
	for (i = 0; i < p->nrelocs; i++) {
99
		struct drm_radeon_cs_reloc *r;
5271 serge 100
		struct drm_gem_object *gobj;
5078 serge 101
		unsigned priority;
102
 
103
		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
5271 serge 104
		gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
105
		if (gobj == NULL) {
5078 serge 106
			DRM_ERROR("gem object lookup failed 0x%x\n",
107
				  r->handle);
108
			return -ENOENT;
109
		}
5271 serge 110
		p->relocs[i].robj = gem_to_radeon_bo(gobj);
5078 serge 111
 
112
		/* The userspace buffer priorities are from 0 to 15. A higher
113
		 * number means the buffer is more important.
114
		 * Also, the buffers used for write have a higher priority than
115
		 * the buffers used for read only, which doubles the range
116
		 * to 0 to 31. 32 is reserved for the kernel driver.
117
		 */
118
		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
119
			   + !!r->write_domain;
120
 
121
		/* the first reloc of an UVD job is the msg and that must be in
5271 serge 122
		   VRAM, also but everything into VRAM on AGP cards and older
123
		   IGP chips to avoid image corruptions */
5078 serge 124
		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
5271 serge 125
		    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
126
		     p->rdev->family == CHIP_RS780 ||
127
		     p->rdev->family == CHIP_RS880)) {
128
 
5078 serge 129
			/* TODO: is this still needed for NI+ ? */
130
			p->relocs[i].prefered_domains =
131
				RADEON_GEM_DOMAIN_VRAM;
132
 
133
			p->relocs[i].allowed_domains =
134
				RADEON_GEM_DOMAIN_VRAM;
135
 
136
			/* prioritize this over any other relocation */
137
			priority = RADEON_CS_MAX_PRIORITY;
138
		} else {
139
			uint32_t domain = r->write_domain ?
140
				r->write_domain : r->read_domains;
141
 
142
			if (domain & RADEON_GEM_DOMAIN_CPU) {
143
				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
144
					  "for command submission\n");
145
				return -EINVAL;
146
			}
147
 
148
			p->relocs[i].prefered_domains = domain;
149
			if (domain == RADEON_GEM_DOMAIN_VRAM)
150
				domain |= RADEON_GEM_DOMAIN_GTT;
151
			p->relocs[i].allowed_domains = domain;
152
		}
5271 serge 153
/*
154
		if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
155
			uint32_t domain = p->relocs[i].prefered_domains;
156
			if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
157
				DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
158
					  "allowed for userptr BOs\n");
159
				return -EINVAL;
160
			}
161
			need_mmap_lock = true;
162
			domain = RADEON_GEM_DOMAIN_GTT;
163
			p->relocs[i].prefered_domains = domain;
164
			p->relocs[i].allowed_domains = domain;
165
		}
166
*/
5078 serge 167
		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
5271 serge 168
		p->relocs[i].tv.shared = !r->write_domain;
5078 serge 169
 
170
		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
171
				      priority);
172
	}
173
 
174
	radeon_cs_buckets_get_list(&buckets, &p->validated);
175
 
176
	if (p->cs_flags & RADEON_CS_USE_VM)
177
		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
178
					      &p->validated);
179
 
5271 serge 180
	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
181
 
182
	return r;
5078 serge 183
}
184
 
185
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
186
{
187
	p->priority = priority;
188
 
189
	switch (ring) {
190
	default:
191
		DRM_ERROR("unknown ring id: %d\n", ring);
192
		return -EINVAL;
193
	case RADEON_CS_RING_GFX:
194
		p->ring = RADEON_RING_TYPE_GFX_INDEX;
195
		break;
196
	case RADEON_CS_RING_COMPUTE:
197
		if (p->rdev->family >= CHIP_TAHITI) {
198
			if (p->priority > 0)
199
				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
200
			else
201
				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
202
		} else
203
			p->ring = RADEON_RING_TYPE_GFX_INDEX;
204
		break;
205
	case RADEON_CS_RING_DMA:
206
		if (p->rdev->family >= CHIP_CAYMAN) {
207
			if (p->priority > 0)
208
				p->ring = R600_RING_TYPE_DMA_INDEX;
209
			else
210
				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
211
		} else if (p->rdev->family >= CHIP_RV770) {
212
			p->ring = R600_RING_TYPE_DMA_INDEX;
213
		} else {
214
			return -EINVAL;
215
		}
216
		break;
217
	case RADEON_CS_RING_UVD:
218
		p->ring = R600_RING_TYPE_UVD_INDEX;
219
		break;
220
	case RADEON_CS_RING_VCE:
221
		/* TODO: only use the low priority ring for now */
222
		p->ring = TN_RING_TYPE_VCE1_INDEX;
223
		break;
224
	}
225
	return 0;
226
}
227
 
5271 serge 228
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
5078 serge 229
{
5271 serge 230
	struct radeon_bo_list *reloc;
231
	int r;
5078 serge 232
 
5271 serge 233
	list_for_each_entry(reloc, &p->validated, tv.head) {
234
		struct reservation_object *resv;
5078 serge 235
 
5271 serge 236
		resv = reloc->robj->tbo.resv;
237
		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
238
				     reloc->tv.shared);
239
		if (r)
240
			return r;
5078 serge 241
	}
5271 serge 242
	return 0;
5078 serge 243
}
244
 
245
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
246
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
247
{
248
	struct drm_radeon_cs *cs = data;
249
	uint64_t *chunk_array_ptr;
250
	unsigned size, i;
251
	u32 ring = RADEON_CS_RING_GFX;
252
	s32 priority = 0;
253
 
6104 serge 254
	INIT_LIST_HEAD(&p->validated);
255
 
5078 serge 256
	if (!cs->num_chunks) {
257
		return 0;
258
	}
6104 serge 259
 
5078 serge 260
	/* get chunks */
261
	p->idx = 0;
262
	p->ib.sa_bo = NULL;
263
	p->const_ib.sa_bo = NULL;
5271 serge 264
	p->chunk_ib = NULL;
265
	p->chunk_relocs = NULL;
266
	p->chunk_flags = NULL;
267
	p->chunk_const_ib = NULL;
5078 serge 268
	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
269
	if (p->chunks_array == NULL) {
270
		return -ENOMEM;
271
	}
272
	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
273
	if (copy_from_user(p->chunks_array, chunk_array_ptr,
274
			       sizeof(uint64_t)*cs->num_chunks)) {
275
		return -EFAULT;
276
	}
277
	p->cs_flags = 0;
278
	p->nchunks = cs->num_chunks;
279
	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
280
	if (p->chunks == NULL) {
281
		return -ENOMEM;
282
	}
283
	for (i = 0; i < p->nchunks; i++) {
284
		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
285
		struct drm_radeon_cs_chunk user_chunk;
286
		uint32_t __user *cdata;
287
 
288
		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
289
		if (copy_from_user(&user_chunk, chunk_ptr,
290
				       sizeof(struct drm_radeon_cs_chunk))) {
291
			return -EFAULT;
292
		}
293
		p->chunks[i].length_dw = user_chunk.length_dw;
5271 serge 294
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
295
			p->chunk_relocs = &p->chunks[i];
5078 serge 296
		}
5271 serge 297
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
298
			p->chunk_ib = &p->chunks[i];
5078 serge 299
			/* zero length IB isn't useful */
300
			if (p->chunks[i].length_dw == 0)
301
				return -EINVAL;
302
		}
5271 serge 303
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
304
			p->chunk_const_ib = &p->chunks[i];
5078 serge 305
			/* zero length CONST IB isn't useful */
306
			if (p->chunks[i].length_dw == 0)
307
				return -EINVAL;
308
		}
5271 serge 309
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
310
			p->chunk_flags = &p->chunks[i];
5078 serge 311
			/* zero length flags aren't useful */
312
			if (p->chunks[i].length_dw == 0)
313
				return -EINVAL;
314
		}
315
 
316
		size = p->chunks[i].length_dw;
317
		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
318
		p->chunks[i].user_ptr = cdata;
5271 serge 319
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
5078 serge 320
			continue;
321
 
5271 serge 322
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
5078 serge 323
			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
324
				continue;
325
		}
326
 
327
		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
328
		size *= sizeof(uint32_t);
329
		if (p->chunks[i].kdata == NULL) {
330
			return -ENOMEM;
331
		}
332
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
333
			return -EFAULT;
334
		}
5271 serge 335
		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
5078 serge 336
			p->cs_flags = p->chunks[i].kdata[0];
337
			if (p->chunks[i].length_dw > 1)
338
				ring = p->chunks[i].kdata[1];
339
			if (p->chunks[i].length_dw > 2)
340
				priority = (s32)p->chunks[i].kdata[2];
341
		}
342
	}
343
 
344
	/* these are KMS only */
345
	if (p->rdev) {
346
		if ((p->cs_flags & RADEON_CS_USE_VM) &&
347
		    !p->rdev->vm_manager.enabled) {
348
			DRM_ERROR("VM not active on asic!\n");
349
			return -EINVAL;
350
		}
351
 
352
		if (radeon_cs_get_ring(p, ring, priority))
353
			return -EINVAL;
354
 
355
		/* we only support VM on some SI+ rings */
356
		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
357
			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
358
				DRM_ERROR("Ring %d requires VM!\n", p->ring);
359
				return -EINVAL;
360
			}
361
		} else {
362
			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
363
				DRM_ERROR("VM not supported on ring %d!\n",
364
					  p->ring);
365
				return -EINVAL;
366
			}
367
		}
368
	}
369
 
370
	return 0;
371
}
372
 
373
static int cmp_size_smaller_first(void *priv, struct list_head *a,
374
				  struct list_head *b)
375
{
5271 serge 376
	struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
377
	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
5078 serge 378
 
379
	/* Sort A before B if A is smaller. */
380
	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
381
}
382
 
383
/**
384
 * cs_parser_fini() - clean parser states
385
 * @parser:	parser structure holding parsing context.
386
 * @error:	error number
387
 *
388
 * If error is set than unvalidate buffer, otherwise just free memory
389
 * used by parsing context.
390
 **/
391
static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
392
{
393
	unsigned i;
394
 
395
	if (!error) {
396
		/* Sort the buffer list from the smallest to largest buffer,
397
		 * which affects the order of buffers in the LRU list.
398
		 * This assures that the smallest buffers are added first
399
		 * to the LRU list, so they are likely to be later evicted
400
		 * first, instead of large buffers whose eviction is more
401
		 * expensive.
402
		 *
403
		 * This slightly lowers the number of bytes moved by TTM
404
		 * per frame under memory pressure.
405
		 */
406
		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
407
 
408
		ttm_eu_fence_buffer_objects(&parser->ticket,
409
					    &parser->validated,
5271 serge 410
					    &parser->ib.fence->base);
5078 serge 411
	} else if (backoff) {
412
		ttm_eu_backoff_reservation(&parser->ticket,
413
					   &parser->validated);
414
	}
415
 
416
	if (parser->relocs != NULL) {
417
		for (i = 0; i < parser->nrelocs; i++) {
5271 serge 418
			struct radeon_bo *bo = parser->relocs[i].robj;
419
			if (bo == NULL)
420
				continue;
421
 
422
			drm_gem_object_unreference_unlocked(&bo->gem_base);
5078 serge 423
		}
424
	}
425
	kfree(parser->track);
426
	kfree(parser->relocs);
5271 serge 427
	drm_free_large(parser->vm_bos);
5078 serge 428
	for (i = 0; i < parser->nchunks; i++)
429
		drm_free_large(parser->chunks[i].kdata);
430
	kfree(parser->chunks);
431
	kfree(parser->chunks_array);
432
	radeon_ib_free(parser->rdev, &parser->ib);
433
	radeon_ib_free(parser->rdev, &parser->const_ib);
434
}
435
 
436
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
437
			      struct radeon_cs_parser *parser)
438
{
439
	int r;
440
 
5271 serge 441
	if (parser->chunk_ib == NULL)
5078 serge 442
		return 0;
443
 
444
	if (parser->cs_flags & RADEON_CS_USE_VM)
445
		return 0;
446
 
447
	r = radeon_cs_parse(rdev, parser->ring, parser);
448
	if (r || parser->parser_error) {
449
		DRM_ERROR("Invalid command stream !\n");
450
		return r;
451
	}
452
 
5271 serge 453
	r = radeon_cs_sync_rings(parser);
454
	if (r) {
455
		if (r != -ERESTARTSYS)
456
			DRM_ERROR("Failed to sync rings: %i\n", r);
457
		return r;
458
	}
459
 
5078 serge 460
	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
461
		radeon_uvd_note_usage(rdev);
462
	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
463
		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
464
		radeon_vce_note_usage(rdev);
465
 
466
	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
467
	if (r) {
468
		DRM_ERROR("Failed to schedule IB !\n");
469
	}
470
	return r;
471
}
472
 
473
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
474
				   struct radeon_vm *vm)
475
{
476
	struct radeon_device *rdev = p->rdev;
477
	struct radeon_bo_va *bo_va;
478
	int i, r;
479
 
480
	r = radeon_vm_update_page_directory(rdev, vm);
481
	if (r)
482
		return r;
483
 
484
	r = radeon_vm_clear_freed(rdev, vm);
485
	if (r)
486
		return r;
487
 
488
	if (vm->ib_bo_va == NULL) {
489
		DRM_ERROR("Tmp BO not in VM!\n");
490
		return -EINVAL;
491
	}
492
 
493
	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
494
				&rdev->ring_tmp_bo.bo->tbo.mem);
495
	if (r)
496
		return r;
497
 
498
	for (i = 0; i < p->nrelocs; i++) {
499
		struct radeon_bo *bo;
500
 
501
		bo = p->relocs[i].robj;
502
		bo_va = radeon_vm_bo_find(vm, bo);
503
		if (bo_va == NULL) {
504
			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
505
			return -EINVAL;
506
		}
507
 
508
		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
509
		if (r)
510
			return r;
5271 serge 511
 
512
		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
5078 serge 513
	}
514
 
515
	return radeon_vm_clear_invalids(rdev, vm);
516
}
517
 
518
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
519
				 struct radeon_cs_parser *parser)
520
{
521
	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
522
	struct radeon_vm *vm = &fpriv->vm;
523
	int r;
524
 
5271 serge 525
	if (parser->chunk_ib == NULL)
5078 serge 526
		return 0;
527
	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
528
		return 0;
529
 
530
	if (parser->const_ib.length_dw) {
531
		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
532
		if (r) {
533
			return r;
534
		}
535
	}
536
 
537
	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
538
	if (r) {
539
		return r;
540
	}
541
 
542
	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
543
		radeon_uvd_note_usage(rdev);
544
 
545
	mutex_lock(&vm->mutex);
546
	r = radeon_bo_vm_update_pte(parser, vm);
547
	if (r) {
548
		goto out;
549
	}
550
 
5271 serge 551
	r = radeon_cs_sync_rings(parser);
552
	if (r) {
553
		if (r != -ERESTARTSYS)
554
			DRM_ERROR("Failed to sync rings: %i\n", r);
555
		goto out;
556
	}
557
 
5078 serge 558
	if ((rdev->family >= CHIP_TAHITI) &&
5271 serge 559
	    (parser->chunk_const_ib != NULL)) {
5078 serge 560
		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
561
	} else {
562
		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
563
	}
564
 
565
out:
566
	mutex_unlock(&vm->mutex);
567
	return r;
568
}
569
 
570
static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
571
{
572
	if (r == -EDEADLK) {
573
		r = radeon_gpu_reset(rdev);
574
		if (!r)
575
			r = -EAGAIN;
576
	}
577
	return r;
578
}
579
 
580
static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
581
{
582
	struct radeon_cs_chunk *ib_chunk;
583
	struct radeon_vm *vm = NULL;
584
	int r;
585
 
5271 serge 586
	if (parser->chunk_ib == NULL)
5078 serge 587
		return 0;
588
 
589
	if (parser->cs_flags & RADEON_CS_USE_VM) {
590
		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
591
		vm = &fpriv->vm;
592
 
593
		if ((rdev->family >= CHIP_TAHITI) &&
5271 serge 594
		    (parser->chunk_const_ib != NULL)) {
595
			ib_chunk = parser->chunk_const_ib;
5078 serge 596
			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
597
				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
598
				return -EINVAL;
599
			}
600
			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
601
					   vm, ib_chunk->length_dw * 4);
602
			if (r) {
603
				DRM_ERROR("Failed to get const ib !\n");
604
				return r;
605
			}
606
			parser->const_ib.is_const_ib = true;
607
			parser->const_ib.length_dw = ib_chunk->length_dw;
608
			if (copy_from_user(parser->const_ib.ptr,
609
					       ib_chunk->user_ptr,
610
					       ib_chunk->length_dw * 4))
611
				return -EFAULT;
612
		}
613
 
5271 serge 614
		ib_chunk = parser->chunk_ib;
5078 serge 615
		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
616
			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
617
			return -EINVAL;
618
		}
619
	}
5271 serge 620
	ib_chunk = parser->chunk_ib;
5078 serge 621
 
622
	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
623
			   vm, ib_chunk->length_dw * 4);
624
	if (r) {
625
		DRM_ERROR("Failed to get ib !\n");
626
		return r;
627
	}
628
	parser->ib.length_dw = ib_chunk->length_dw;
629
	if (ib_chunk->kdata)
630
		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
631
	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
632
		return -EFAULT;
633
	return 0;
634
}
635
 
636
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
637
{
638
	struct radeon_device *rdev = dev->dev_private;
639
	struct radeon_cs_parser parser;
640
	int r;
641
 
5346 serge 642
	down_read(&rdev->exclusive_lock);
5078 serge 643
	if (!rdev->accel_working) {
5346 serge 644
		up_read(&rdev->exclusive_lock);
5078 serge 645
		return -EBUSY;
646
	}
647
	/* initialize parser */
648
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
649
	parser.filp = filp;
650
	parser.rdev = rdev;
651
	parser.dev = rdev->dev;
652
	parser.family = rdev->family;
653
	r = radeon_cs_parser_init(&parser, data);
654
	if (r) {
655
		DRM_ERROR("Failed to initialize parser !\n");
656
		radeon_cs_parser_fini(&parser, r, false);
5346 serge 657
		up_read(&rdev->exclusive_lock);
5078 serge 658
		r = radeon_cs_handle_lockup(rdev, r);
659
		return r;
660
	}
661
 
662
	r = radeon_cs_ib_fill(rdev, &parser);
663
	if (!r) {
664
		r = radeon_cs_parser_relocs(&parser);
665
		if (r && r != -ERESTARTSYS)
666
			DRM_ERROR("Failed to parse relocation %d!\n", r);
667
	}
668
 
669
	if (r) {
670
		radeon_cs_parser_fini(&parser, r, false);
5346 serge 671
		up_read(&rdev->exclusive_lock);
5078 serge 672
		r = radeon_cs_handle_lockup(rdev, r);
673
		return r;
674
	}
675
 
676
	trace_radeon_cs(&parser);
677
 
678
	r = radeon_cs_ib_chunk(rdev, &parser);
679
	if (r) {
680
		goto out;
681
	}
682
	r = radeon_cs_ib_vm_chunk(rdev, &parser);
683
	if (r) {
684
		goto out;
685
	}
686
out:
687
	radeon_cs_parser_fini(&parser, r, true);
5346 serge 688
	up_read(&rdev->exclusive_lock);
5078 serge 689
	r = radeon_cs_handle_lockup(rdev, r);
690
	return r;
691
}
692
 
693
/**
694
 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
695
 * @parser:	parser structure holding parsing context.
696
 * @pkt:	where to store packet information
697
 *
698
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
699
 * if packet is bigger than remaining ib size. or if packets is unknown.
700
 **/
701
int radeon_cs_packet_parse(struct radeon_cs_parser *p,
702
			   struct radeon_cs_packet *pkt,
703
			   unsigned idx)
704
{
5271 serge 705
	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
5078 serge 706
	struct radeon_device *rdev = p->rdev;
707
	uint32_t header;
6104 serge 708
	int ret = 0, i;
5078 serge 709
 
710
	if (idx >= ib_chunk->length_dw) {
711
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
712
			  idx, ib_chunk->length_dw);
713
		return -EINVAL;
714
	}
715
	header = radeon_get_ib_value(p, idx);
716
	pkt->idx = idx;
717
	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
718
	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
719
	pkt->one_reg_wr = 0;
720
	switch (pkt->type) {
721
	case RADEON_PACKET_TYPE0:
722
		if (rdev->family < CHIP_R600) {
723
			pkt->reg = R100_CP_PACKET0_GET_REG(header);
724
			pkt->one_reg_wr =
725
				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
726
		} else
727
			pkt->reg = R600_CP_PACKET0_GET_REG(header);
728
		break;
729
	case RADEON_PACKET_TYPE3:
730
		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
731
		break;
732
	case RADEON_PACKET_TYPE2:
733
		pkt->count = -1;
734
		break;
735
	default:
736
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
6104 serge 737
		ret = -EINVAL;
738
		goto dump_ib;
5078 serge 739
	}
740
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
741
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
742
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
6104 serge 743
		ret = -EINVAL;
744
		goto dump_ib;
5078 serge 745
	}
746
	return 0;
6104 serge 747
 
748
dump_ib:
749
	for (i = 0; i < ib_chunk->length_dw; i++) {
750
		if (i == idx)
751
			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
752
		else
753
			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
754
	}
755
	return ret;
5078 serge 756
}
757
 
758
/**
759
 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
760
 * @p:		structure holding the parser context.
761
 *
762
 * Check if the next packet is NOP relocation packet3.
763
 **/
764
bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
765
{
766
	struct radeon_cs_packet p3reloc;
767
	int r;
768
 
769
	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
770
	if (r)
771
		return false;
772
	if (p3reloc.type != RADEON_PACKET_TYPE3)
773
		return false;
774
	if (p3reloc.opcode != RADEON_PACKET3_NOP)
775
		return false;
776
	return true;
777
}
778
 
779
/**
780
 * radeon_cs_dump_packet() - dump raw packet context
781
 * @p:		structure holding the parser context.
782
 * @pkt:	structure holding the packet.
783
 *
784
 * Used mostly for debugging and error reporting.
785
 **/
786
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
787
			   struct radeon_cs_packet *pkt)
788
{
789
	volatile uint32_t *ib;
790
	unsigned i;
791
	unsigned idx;
792
 
793
	ib = p->ib.ptr;
794
	idx = pkt->idx;
795
	for (i = 0; i <= (pkt->count + 1); i++, idx++)
796
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
797
}
798
 
799
/**
800
 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
801
 * @parser:		parser structure holding parsing context.
802
 * @data:		pointer to relocation data
803
 * @offset_start:	starting offset
804
 * @offset_mask:	offset mask (to align start offset on)
805
 * @reloc:		reloc informations
806
 *
807
 * Check if next packet is relocation packet3, do bo validation and compute
808
 * GPU offset using the provided start.
809
 **/
810
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
5271 serge 811
				struct radeon_bo_list **cs_reloc,
5078 serge 812
				int nomm)
813
{
814
	struct radeon_cs_chunk *relocs_chunk;
815
	struct radeon_cs_packet p3reloc;
816
	unsigned idx;
817
	int r;
818
 
5271 serge 819
	if (p->chunk_relocs == NULL) {
5078 serge 820
		DRM_ERROR("No relocation chunk !\n");
821
		return -EINVAL;
822
	}
823
	*cs_reloc = NULL;
5271 serge 824
	relocs_chunk = p->chunk_relocs;
5078 serge 825
	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
826
	if (r)
827
		return r;
828
	p->idx += p3reloc.count + 2;
829
	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
830
	    p3reloc.opcode != RADEON_PACKET3_NOP) {
831
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
832
			  p3reloc.idx);
833
		radeon_cs_dump_packet(p, &p3reloc);
834
		return -EINVAL;
835
	}
836
	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
837
	if (idx >= relocs_chunk->length_dw) {
838
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
839
			  idx, relocs_chunk->length_dw);
840
		radeon_cs_dump_packet(p, &p3reloc);
841
		return -EINVAL;
842
	}
843
	/* FIXME: we assume reloc size is 4 dwords */
844
	if (nomm) {
845
		*cs_reloc = p->relocs;
846
		(*cs_reloc)->gpu_offset =
847
			(u64)relocs_chunk->kdata[idx + 3] << 32;
848
		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
849
	} else
5271 serge 850
		*cs_reloc = &p->relocs[(idx / 4)];
5078 serge 851
	return 0;
852
}