Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2008 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice (including the next
13
 * paragraph) shall be included in all copies or substantial portions of the
14
 * Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
 * DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors:
25
 *    Jerome Glisse 
26
 */
27
#include 
28
#include 
29
#include 
30
#include "radeon_reg.h"
31
#include "radeon.h"
32
#include "radeon_trace.h"
33
 
34
#define RADEON_CS_MAX_PRIORITY		32u
35
#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
36
 
37
static inline unsigned long
38
copy_from_user(void *to, const void __user *from, unsigned long n)
39
{
40
    memcpy(to, from, n);
41
    return n;
42
}
43
 
44
/* This is based on the bucket sort with O(n) time complexity.
45
 * An item with priority "i" is added to bucket[i]. The lists are then
46
 * concatenated in descending order.
47
 */
48
struct radeon_cs_buckets {
49
	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
50
};
51
 
52
static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
53
{
54
	unsigned i;
55
 
56
	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
57
		INIT_LIST_HEAD(&b->bucket[i]);
58
}
59
 
60
static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
61
				  struct list_head *item, unsigned priority)
62
{
63
	/* Since buffers which appear sooner in the relocation list are
64
	 * likely to be used more often than buffers which appear later
65
	 * in the list, the sort mustn't change the ordering of buffers
66
	 * with the same priority, i.e. it must be stable.
67
	 */
68
	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
69
}
70
 
71
static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
72
				       struct list_head *out_list)
73
{
74
	unsigned i;
75
 
76
	/* Connect the sorted buckets in the output list. */
77
	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
78
		list_splice(&b->bucket[i], out_list);
79
	}
80
}
81
 
82
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
83
{
84
	struct drm_device *ddev = p->rdev->ddev;
85
	struct radeon_cs_chunk *chunk;
86
	struct radeon_cs_buckets buckets;
87
	unsigned i, j;
88
	bool duplicate;
89
 
90
	if (p->chunk_relocs_idx == -1) {
91
		return 0;
92
	}
93
	chunk = &p->chunks[p->chunk_relocs_idx];
94
	p->dma_reloc_idx = 0;
95
	/* FIXME: we assume that each relocs use 4 dwords */
96
	p->nrelocs = chunk->length_dw / 4;
97
	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
98
	if (p->relocs_ptr == NULL) {
99
		return -ENOMEM;
100
	}
101
	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
102
	if (p->relocs == NULL) {
103
		return -ENOMEM;
104
	}
105
 
106
	radeon_cs_buckets_init(&buckets);
107
 
108
	for (i = 0; i < p->nrelocs; i++) {
109
		struct drm_radeon_cs_reloc *r;
110
		unsigned priority;
111
 
112
		duplicate = false;
113
		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
114
		for (j = 0; j < i; j++) {
115
			if (r->handle == p->relocs[j].handle) {
116
				p->relocs_ptr[i] = &p->relocs[j];
117
				duplicate = true;
118
				break;
119
			}
120
		}
121
		if (duplicate) {
122
			p->relocs[i].handle = 0;
123
			continue;
124
		}
125
 
126
		p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
127
							  r->handle);
128
		if (p->relocs[i].gobj == NULL) {
129
			DRM_ERROR("gem object lookup failed 0x%x\n",
130
				  r->handle);
131
			return -ENOENT;
132
		}
133
		p->relocs_ptr[i] = &p->relocs[i];
134
		p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
135
 
136
		/* The userspace buffer priorities are from 0 to 15. A higher
137
		 * number means the buffer is more important.
138
		 * Also, the buffers used for write have a higher priority than
139
		 * the buffers used for read only, which doubles the range
140
		 * to 0 to 31. 32 is reserved for the kernel driver.
141
		 */
142
		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
143
			   + !!r->write_domain;
144
 
145
		/* the first reloc of an UVD job is the msg and that must be in
146
		   VRAM, also but everything into VRAM on AGP cards to avoid
147
		   image corruptions */
148
		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
149
		    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
150
			/* TODO: is this still needed for NI+ ? */
151
			p->relocs[i].prefered_domains =
152
				RADEON_GEM_DOMAIN_VRAM;
153
 
154
			p->relocs[i].allowed_domains =
155
				RADEON_GEM_DOMAIN_VRAM;
156
 
157
			/* prioritize this over any other relocation */
158
			priority = RADEON_CS_MAX_PRIORITY;
159
		} else {
160
			uint32_t domain = r->write_domain ?
161
				r->write_domain : r->read_domains;
162
 
163
			if (domain & RADEON_GEM_DOMAIN_CPU) {
164
				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
165
					  "for command submission\n");
166
				return -EINVAL;
167
			}
168
 
169
			p->relocs[i].prefered_domains = domain;
170
			if (domain == RADEON_GEM_DOMAIN_VRAM)
171
				domain |= RADEON_GEM_DOMAIN_GTT;
172
			p->relocs[i].allowed_domains = domain;
173
		}
174
 
175
		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
176
		p->relocs[i].handle = r->handle;
177
 
178
		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
179
				      priority);
180
	}
181
 
182
	radeon_cs_buckets_get_list(&buckets, &p->validated);
183
 
184
	if (p->cs_flags & RADEON_CS_USE_VM)
185
		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
186
					      &p->validated);
187
 
188
	return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
189
}
190
 
191
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
192
{
193
	p->priority = priority;
194
 
195
	switch (ring) {
196
	default:
197
		DRM_ERROR("unknown ring id: %d\n", ring);
198
		return -EINVAL;
199
	case RADEON_CS_RING_GFX:
200
		p->ring = RADEON_RING_TYPE_GFX_INDEX;
201
		break;
202
	case RADEON_CS_RING_COMPUTE:
203
		if (p->rdev->family >= CHIP_TAHITI) {
204
			if (p->priority > 0)
205
				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
206
			else
207
				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
208
		} else
209
			p->ring = RADEON_RING_TYPE_GFX_INDEX;
210
		break;
211
	case RADEON_CS_RING_DMA:
212
		if (p->rdev->family >= CHIP_CAYMAN) {
213
			if (p->priority > 0)
214
				p->ring = R600_RING_TYPE_DMA_INDEX;
215
			else
216
				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
217
		} else if (p->rdev->family >= CHIP_RV770) {
218
			p->ring = R600_RING_TYPE_DMA_INDEX;
219
		} else {
220
			return -EINVAL;
221
		}
222
		break;
223
	case RADEON_CS_RING_UVD:
224
		p->ring = R600_RING_TYPE_UVD_INDEX;
225
		break;
226
	case RADEON_CS_RING_VCE:
227
		/* TODO: only use the low priority ring for now */
228
		p->ring = TN_RING_TYPE_VCE1_INDEX;
229
		break;
230
	}
231
	return 0;
232
}
233
 
234
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
235
{
236
	int i;
237
 
238
	for (i = 0; i < p->nrelocs; i++) {
239
		if (!p->relocs[i].robj)
240
			continue;
241
 
242
		radeon_semaphore_sync_to(p->ib.semaphore,
243
					 p->relocs[i].robj->tbo.sync_obj);
244
	}
245
}
246
 
247
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
248
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
249
{
250
	struct drm_radeon_cs *cs = data;
251
	uint64_t *chunk_array_ptr;
252
	unsigned size, i;
253
	u32 ring = RADEON_CS_RING_GFX;
254
	s32 priority = 0;
255
 
256
	if (!cs->num_chunks) {
257
		return 0;
258
	}
259
	/* get chunks */
260
	INIT_LIST_HEAD(&p->validated);
261
	p->idx = 0;
262
	p->ib.sa_bo = NULL;
263
	p->ib.semaphore = NULL;
264
	p->const_ib.sa_bo = NULL;
265
	p->const_ib.semaphore = NULL;
266
	p->chunk_ib_idx = -1;
267
	p->chunk_relocs_idx = -1;
268
	p->chunk_flags_idx = -1;
269
	p->chunk_const_ib_idx = -1;
270
	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
271
	if (p->chunks_array == NULL) {
272
		return -ENOMEM;
273
	}
274
	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
275
	if (copy_from_user(p->chunks_array, chunk_array_ptr,
276
			       sizeof(uint64_t)*cs->num_chunks)) {
277
		return -EFAULT;
278
	}
279
	p->cs_flags = 0;
280
	p->nchunks = cs->num_chunks;
281
	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
282
	if (p->chunks == NULL) {
283
		return -ENOMEM;
284
	}
285
	for (i = 0; i < p->nchunks; i++) {
286
		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
287
		struct drm_radeon_cs_chunk user_chunk;
288
		uint32_t __user *cdata;
289
 
290
		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
291
		if (copy_from_user(&user_chunk, chunk_ptr,
292
				       sizeof(struct drm_radeon_cs_chunk))) {
293
			return -EFAULT;
294
		}
295
		p->chunks[i].length_dw = user_chunk.length_dw;
296
		p->chunks[i].chunk_id = user_chunk.chunk_id;
297
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
298
			p->chunk_relocs_idx = i;
299
		}
300
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
301
			p->chunk_ib_idx = i;
302
			/* zero length IB isn't useful */
303
			if (p->chunks[i].length_dw == 0)
304
				return -EINVAL;
305
		}
306
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
307
			p->chunk_const_ib_idx = i;
308
			/* zero length CONST IB isn't useful */
309
			if (p->chunks[i].length_dw == 0)
310
				return -EINVAL;
311
		}
312
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
313
			p->chunk_flags_idx = i;
314
			/* zero length flags aren't useful */
315
			if (p->chunks[i].length_dw == 0)
316
				return -EINVAL;
317
		}
318
 
319
		size = p->chunks[i].length_dw;
320
		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
321
		p->chunks[i].user_ptr = cdata;
322
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
323
			continue;
324
 
325
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
326
			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
327
				continue;
328
		}
329
 
330
		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
331
		size *= sizeof(uint32_t);
332
		if (p->chunks[i].kdata == NULL) {
333
			return -ENOMEM;
334
		}
335
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
336
			return -EFAULT;
337
		}
338
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
339
			p->cs_flags = p->chunks[i].kdata[0];
340
			if (p->chunks[i].length_dw > 1)
341
				ring = p->chunks[i].kdata[1];
342
			if (p->chunks[i].length_dw > 2)
343
				priority = (s32)p->chunks[i].kdata[2];
344
		}
345
	}
346
 
347
	/* these are KMS only */
348
	if (p->rdev) {
349
		if ((p->cs_flags & RADEON_CS_USE_VM) &&
350
		    !p->rdev->vm_manager.enabled) {
351
			DRM_ERROR("VM not active on asic!\n");
352
			return -EINVAL;
353
		}
354
 
355
		if (radeon_cs_get_ring(p, ring, priority))
356
			return -EINVAL;
357
 
358
		/* we only support VM on some SI+ rings */
359
		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
360
			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
361
				DRM_ERROR("Ring %d requires VM!\n", p->ring);
362
				return -EINVAL;
363
			}
364
		} else {
365
			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
366
				DRM_ERROR("VM not supported on ring %d!\n",
367
					  p->ring);
368
				return -EINVAL;
369
			}
370
		}
371
	}
372
 
373
	return 0;
374
}
375
 
376
static int cmp_size_smaller_first(void *priv, struct list_head *a,
377
				  struct list_head *b)
378
{
379
	struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
380
	struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
381
 
382
	/* Sort A before B if A is smaller. */
383
	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
384
}
385
 
386
/**
387
 * cs_parser_fini() - clean parser states
388
 * @parser:	parser structure holding parsing context.
389
 * @error:	error number
390
 *
391
 * If error is set than unvalidate buffer, otherwise just free memory
392
 * used by parsing context.
393
 **/
394
static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
395
{
396
	unsigned i;
397
 
398
	if (!error) {
399
		/* Sort the buffer list from the smallest to largest buffer,
400
		 * which affects the order of buffers in the LRU list.
401
		 * This assures that the smallest buffers are added first
402
		 * to the LRU list, so they are likely to be later evicted
403
		 * first, instead of large buffers whose eviction is more
404
		 * expensive.
405
		 *
406
		 * This slightly lowers the number of bytes moved by TTM
407
		 * per frame under memory pressure.
408
		 */
409
		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
410
 
411
		ttm_eu_fence_buffer_objects(&parser->ticket,
412
					    &parser->validated,
413
					    parser->ib.fence);
414
	} else if (backoff) {
415
		ttm_eu_backoff_reservation(&parser->ticket,
416
					   &parser->validated);
417
	}
418
 
419
	if (parser->relocs != NULL) {
420
		for (i = 0; i < parser->nrelocs; i++) {
421
			if (parser->relocs[i].gobj)
422
				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
423
		}
424
	}
425
	kfree(parser->track);
426
	kfree(parser->relocs);
427
	kfree(parser->relocs_ptr);
428
	kfree(parser->vm_bos);
429
	for (i = 0; i < parser->nchunks; i++)
430
		drm_free_large(parser->chunks[i].kdata);
431
	kfree(parser->chunks);
432
	kfree(parser->chunks_array);
433
	radeon_ib_free(parser->rdev, &parser->ib);
434
	radeon_ib_free(parser->rdev, &parser->const_ib);
435
}
436
 
437
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
438
			      struct radeon_cs_parser *parser)
439
{
440
	int r;
441
 
442
	if (parser->chunk_ib_idx == -1)
443
		return 0;
444
 
445
	if (parser->cs_flags & RADEON_CS_USE_VM)
446
		return 0;
447
 
448
	r = radeon_cs_parse(rdev, parser->ring, parser);
449
	if (r || parser->parser_error) {
450
		DRM_ERROR("Invalid command stream !\n");
451
		return r;
452
	}
453
 
454
	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
455
		radeon_uvd_note_usage(rdev);
456
	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
457
		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
458
		radeon_vce_note_usage(rdev);
459
 
460
	radeon_cs_sync_rings(parser);
461
	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
462
	if (r) {
463
		DRM_ERROR("Failed to schedule IB !\n");
464
	}
465
	return r;
466
}
467
 
468
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
469
				   struct radeon_vm *vm)
470
{
471
	struct radeon_device *rdev = p->rdev;
472
	struct radeon_bo_va *bo_va;
473
	int i, r;
474
 
475
	r = radeon_vm_update_page_directory(rdev, vm);
476
	if (r)
477
		return r;
478
 
479
	r = radeon_vm_clear_freed(rdev, vm);
480
	if (r)
481
		return r;
482
 
483
	if (vm->ib_bo_va == NULL) {
484
		DRM_ERROR("Tmp BO not in VM!\n");
485
		return -EINVAL;
486
	}
487
 
488
	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
489
				&rdev->ring_tmp_bo.bo->tbo.mem);
490
	if (r)
491
		return r;
492
 
493
	for (i = 0; i < p->nrelocs; i++) {
494
		struct radeon_bo *bo;
495
 
496
		/* ignore duplicates */
497
		if (p->relocs_ptr[i] != &p->relocs[i])
498
			continue;
499
 
500
		bo = p->relocs[i].robj;
501
		bo_va = radeon_vm_bo_find(vm, bo);
502
		if (bo_va == NULL) {
503
			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
504
			return -EINVAL;
505
		}
506
 
507
		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
508
		if (r)
509
			return r;
510
	}
511
 
512
	return radeon_vm_clear_invalids(rdev, vm);
513
}
514
 
515
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
516
				 struct radeon_cs_parser *parser)
517
{
518
	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
519
	struct radeon_vm *vm = &fpriv->vm;
520
	int r;
521
 
522
	if (parser->chunk_ib_idx == -1)
523
		return 0;
524
	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
525
		return 0;
526
 
527
	if (parser->const_ib.length_dw) {
528
		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
529
		if (r) {
530
			return r;
531
		}
532
	}
533
 
534
	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
535
	if (r) {
536
		return r;
537
	}
538
 
539
	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
540
		radeon_uvd_note_usage(rdev);
541
 
542
	mutex_lock(&vm->mutex);
543
	r = radeon_bo_vm_update_pte(parser, vm);
544
	if (r) {
545
		goto out;
546
	}
547
	radeon_cs_sync_rings(parser);
548
	radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
549
 
550
	if ((rdev->family >= CHIP_TAHITI) &&
551
	    (parser->chunk_const_ib_idx != -1)) {
552
		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
553
	} else {
554
		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
555
	}
556
 
557
out:
558
	mutex_unlock(&vm->mutex);
559
	return r;
560
}
561
 
562
static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
563
{
564
	if (r == -EDEADLK) {
565
		r = radeon_gpu_reset(rdev);
566
		if (!r)
567
			r = -EAGAIN;
568
	}
569
	return r;
570
}
571
 
572
static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
573
{
574
	struct radeon_cs_chunk *ib_chunk;
575
	struct radeon_vm *vm = NULL;
576
	int r;
577
 
578
	if (parser->chunk_ib_idx == -1)
579
		return 0;
580
 
581
	if (parser->cs_flags & RADEON_CS_USE_VM) {
582
		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
583
		vm = &fpriv->vm;
584
 
585
		if ((rdev->family >= CHIP_TAHITI) &&
586
		    (parser->chunk_const_ib_idx != -1)) {
587
			ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
588
			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
589
				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
590
				return -EINVAL;
591
			}
592
			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
593
					   vm, ib_chunk->length_dw * 4);
594
			if (r) {
595
				DRM_ERROR("Failed to get const ib !\n");
596
				return r;
597
			}
598
			parser->const_ib.is_const_ib = true;
599
			parser->const_ib.length_dw = ib_chunk->length_dw;
600
			if (copy_from_user(parser->const_ib.ptr,
601
					       ib_chunk->user_ptr,
602
					       ib_chunk->length_dw * 4))
603
				return -EFAULT;
604
		}
605
 
606
		ib_chunk = &parser->chunks[parser->chunk_ib_idx];
607
		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
608
			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
609
			return -EINVAL;
610
		}
611
	}
612
	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
613
 
614
	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
615
			   vm, ib_chunk->length_dw * 4);
616
	if (r) {
617
		DRM_ERROR("Failed to get ib !\n");
618
		return r;
619
	}
620
	parser->ib.length_dw = ib_chunk->length_dw;
621
	if (ib_chunk->kdata)
622
		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
623
	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
624
		return -EFAULT;
625
	return 0;
626
}
627
 
628
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
629
{
630
	struct radeon_device *rdev = dev->dev_private;
631
	struct radeon_cs_parser parser;
632
	int r;
633
 
634
//   down_read(&rdev->exclusive_lock);
635
	if (!rdev->accel_working) {
636
//       up_read(&rdev->exclusive_lock);
637
		return -EBUSY;
638
	}
639
	/* initialize parser */
640
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
641
	parser.filp = filp;
642
	parser.rdev = rdev;
643
	parser.dev = rdev->dev;
644
	parser.family = rdev->family;
645
	r = radeon_cs_parser_init(&parser, data);
646
	if (r) {
647
		DRM_ERROR("Failed to initialize parser !\n");
648
		radeon_cs_parser_fini(&parser, r, false);
649
//       up_read(&rdev->exclusive_lock);
650
		r = radeon_cs_handle_lockup(rdev, r);
651
		return r;
652
	}
653
 
654
	r = radeon_cs_ib_fill(rdev, &parser);
655
	if (!r) {
656
		r = radeon_cs_parser_relocs(&parser);
657
		if (r && r != -ERESTARTSYS)
658
			DRM_ERROR("Failed to parse relocation %d!\n", r);
659
	}
660
 
661
	if (r) {
662
		radeon_cs_parser_fini(&parser, r, false);
663
//       up_read(&rdev->exclusive_lock);
664
		r = radeon_cs_handle_lockup(rdev, r);
665
		return r;
666
	}
667
 
668
	trace_radeon_cs(&parser);
669
 
670
	r = radeon_cs_ib_chunk(rdev, &parser);
671
	if (r) {
672
		goto out;
673
	}
674
	r = radeon_cs_ib_vm_chunk(rdev, &parser);
675
	if (r) {
676
		goto out;
677
	}
678
out:
679
	radeon_cs_parser_fini(&parser, r, true);
680
//   up_read(&rdev->exclusive_lock);
681
	r = radeon_cs_handle_lockup(rdev, r);
682
	return r;
683
}
684
 
685
/**
686
 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
687
 * @parser:	parser structure holding parsing context.
688
 * @pkt:	where to store packet information
689
 *
690
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
691
 * if packet is bigger than remaining ib size. or if packets is unknown.
692
 **/
693
int radeon_cs_packet_parse(struct radeon_cs_parser *p,
694
			   struct radeon_cs_packet *pkt,
695
			   unsigned idx)
696
{
697
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
698
	struct radeon_device *rdev = p->rdev;
699
	uint32_t header;
700
 
701
	if (idx >= ib_chunk->length_dw) {
702
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
703
			  idx, ib_chunk->length_dw);
704
		return -EINVAL;
705
	}
706
	header = radeon_get_ib_value(p, idx);
707
	pkt->idx = idx;
708
	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
709
	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
710
	pkt->one_reg_wr = 0;
711
	switch (pkt->type) {
712
	case RADEON_PACKET_TYPE0:
713
		if (rdev->family < CHIP_R600) {
714
			pkt->reg = R100_CP_PACKET0_GET_REG(header);
715
			pkt->one_reg_wr =
716
				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
717
		} else
718
			pkt->reg = R600_CP_PACKET0_GET_REG(header);
719
		break;
720
	case RADEON_PACKET_TYPE3:
721
		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
722
		break;
723
	case RADEON_PACKET_TYPE2:
724
		pkt->count = -1;
725
		break;
726
	default:
727
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
728
		return -EINVAL;
729
	}
730
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
731
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
732
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
733
		return -EINVAL;
734
	}
735
	return 0;
736
}
737
 
738
/**
739
 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
740
 * @p:		structure holding the parser context.
741
 *
742
 * Check if the next packet is NOP relocation packet3.
743
 **/
744
bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
745
{
746
	struct radeon_cs_packet p3reloc;
747
	int r;
748
 
749
	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
750
	if (r)
751
		return false;
752
	if (p3reloc.type != RADEON_PACKET_TYPE3)
753
		return false;
754
	if (p3reloc.opcode != RADEON_PACKET3_NOP)
755
		return false;
756
	return true;
757
}
758
 
759
/**
760
 * radeon_cs_dump_packet() - dump raw packet context
761
 * @p:		structure holding the parser context.
762
 * @pkt:	structure holding the packet.
763
 *
764
 * Used mostly for debugging and error reporting.
765
 **/
766
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
767
			   struct radeon_cs_packet *pkt)
768
{
769
	volatile uint32_t *ib;
770
	unsigned i;
771
	unsigned idx;
772
 
773
	ib = p->ib.ptr;
774
	idx = pkt->idx;
775
	for (i = 0; i <= (pkt->count + 1); i++, idx++)
776
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
777
}
778
 
779
/**
780
 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
781
 * @parser:		parser structure holding parsing context.
782
 * @data:		pointer to relocation data
783
 * @offset_start:	starting offset
784
 * @offset_mask:	offset mask (to align start offset on)
785
 * @reloc:		reloc informations
786
 *
787
 * Check if next packet is relocation packet3, do bo validation and compute
788
 * GPU offset using the provided start.
789
 **/
790
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
791
				struct radeon_cs_reloc **cs_reloc,
792
				int nomm)
793
{
794
	struct radeon_cs_chunk *relocs_chunk;
795
	struct radeon_cs_packet p3reloc;
796
	unsigned idx;
797
	int r;
798
 
799
	if (p->chunk_relocs_idx == -1) {
800
		DRM_ERROR("No relocation chunk !\n");
801
		return -EINVAL;
802
	}
803
	*cs_reloc = NULL;
804
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
805
	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
806
	if (r)
807
		return r;
808
	p->idx += p3reloc.count + 2;
809
	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
810
	    p3reloc.opcode != RADEON_PACKET3_NOP) {
811
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
812
			  p3reloc.idx);
813
		radeon_cs_dump_packet(p, &p3reloc);
814
		return -EINVAL;
815
	}
816
	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
817
	if (idx >= relocs_chunk->length_dw) {
818
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
819
			  idx, relocs_chunk->length_dw);
820
		radeon_cs_dump_packet(p, &p3reloc);
821
		return -EINVAL;
822
	}
823
	/* FIXME: we assume reloc size is 4 dwords */
824
	if (nomm) {
825
		*cs_reloc = p->relocs;
826
		(*cs_reloc)->gpu_offset =
827
			(u64)relocs_chunk->kdata[idx + 3] << 32;
828
		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
829
	} else
830
		*cs_reloc = p->relocs_ptr[(idx / 4)];
831
	return 0;
832
}