Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2013 Advanced Micro Devices, Inc.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 * Authors: Christian König 
26
 */
27
 
28
#include 
29
#include 
30
#include 
31
#include 
32
 
33
#include "radeon.h"
34
#include "radeon_asic.h"
35
#include "sid.h"
36
 
37
/* 1 second timeout */
38
#define VCE_IDLE_TIMEOUT_MS	1000
39
 
40
/* Firmware Names */
41
#define FIRMWARE_BONAIRE	"radeon/BONAIRE_vce.bin"
42
 
43
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
44
 
45
static void radeon_vce_idle_work_handler(struct work_struct *work);
46
 
47
/**
48
 * radeon_vce_init - allocate memory, load vce firmware
49
 *
50
 * @rdev: radeon_device pointer
51
 *
52
 * First step to get VCE online, allocate memory and load the firmware
53
 */
54
int radeon_vce_init(struct radeon_device *rdev)
55
{
56
	static const char *fw_version = "[ATI LIB=VCEFW,";
57
	static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
58
	unsigned long size;
59
	const char *fw_name, *c;
60
	uint8_t start, mid, end;
61
	int i, r;
62
 
63
	INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
64
 
65
	switch (rdev->family) {
66
	case CHIP_BONAIRE:
67
	case CHIP_KAVERI:
68
	case CHIP_KABINI:
69
	case CHIP_HAWAII:
70
	case CHIP_MULLINS:
71
		fw_name = FIRMWARE_BONAIRE;
72
		break;
73
 
74
	default:
75
		return -EINVAL;
76
	}
77
 
78
	r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
79
	if (r) {
80
		dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
81
			fw_name);
82
		return r;
83
	}
84
 
85
	/* search for firmware version */
86
 
87
	size = rdev->vce_fw->size - strlen(fw_version) - 9;
88
	c = rdev->vce_fw->data;
89
	for (;size > 0; --size, ++c)
90
		if (strncmp(c, fw_version, strlen(fw_version)) == 0)
91
			break;
92
 
93
	if (size == 0)
94
		return -EINVAL;
95
 
96
	c += strlen(fw_version);
97
	if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
98
		return -EINVAL;
99
 
100
	/* search for feedback version */
101
 
102
	size = rdev->vce_fw->size - strlen(fb_version) - 3;
103
	c = rdev->vce_fw->data;
104
	for (;size > 0; --size, ++c)
105
		if (strncmp(c, fb_version, strlen(fb_version)) == 0)
106
			break;
107
 
108
	if (size == 0)
109
		return -EINVAL;
110
 
111
	c += strlen(fb_version);
112
	if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
113
		return -EINVAL;
114
 
115
	DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
116
		 start, mid, end, rdev->vce.fb_version);
117
 
118
	rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
119
 
120
	/* we can only work with this fw version for now */
121
	if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8)))
122
		return -EINVAL;
123
 
124
	/* allocate firmware, stack and heap BO */
125
 
126
	size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
127
	       RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
128
	r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
129
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
130
	if (r) {
131
		dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
132
		return r;
133
	}
134
 
135
	r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
136
	if (r) {
137
		radeon_bo_unref(&rdev->vce.vcpu_bo);
138
		dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
139
		return r;
140
	}
141
 
142
	r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
143
			  &rdev->vce.gpu_addr);
144
	radeon_bo_unreserve(rdev->vce.vcpu_bo);
145
	if (r) {
146
		radeon_bo_unref(&rdev->vce.vcpu_bo);
147
		dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
148
		return r;
149
	}
150
 
151
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
152
		atomic_set(&rdev->vce.handles[i], 0);
153
		rdev->vce.filp[i] = NULL;
154
        }
155
 
156
	return 0;
157
}
158
 
159
/**
160
 * radeon_vce_fini - free memory
161
 *
162
 * @rdev: radeon_device pointer
163
 *
164
 * Last step on VCE teardown, free firmware memory
165
 */
166
void radeon_vce_fini(struct radeon_device *rdev)
167
{
168
	if (rdev->vce.vcpu_bo == NULL)
169
		return;
170
 
171
	radeon_bo_unref(&rdev->vce.vcpu_bo);
172
 
173
	release_firmware(rdev->vce_fw);
174
}
175
 
176
/**
177
 * radeon_vce_suspend - unpin VCE fw memory
178
 *
179
 * @rdev: radeon_device pointer
180
 *
181
 */
182
int radeon_vce_suspend(struct radeon_device *rdev)
183
{
184
	int i;
185
 
186
	if (rdev->vce.vcpu_bo == NULL)
187
		return 0;
188
 
189
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
190
		if (atomic_read(&rdev->vce.handles[i]))
191
			break;
192
 
193
	if (i == RADEON_MAX_VCE_HANDLES)
194
		return 0;
195
 
196
	/* TODO: suspending running encoding sessions isn't supported */
197
	return -EINVAL;
198
}
199
 
200
/**
201
 * radeon_vce_resume - pin VCE fw memory
202
 *
203
 * @rdev: radeon_device pointer
204
 *
205
 */
206
int radeon_vce_resume(struct radeon_device *rdev)
207
{
208
	void *cpu_addr;
209
	int r;
210
 
211
	if (rdev->vce.vcpu_bo == NULL)
212
		return -EINVAL;
213
 
214
	r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
215
	if (r) {
216
		dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
217
		return r;
218
	}
219
 
220
	r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
221
	if (r) {
222
		radeon_bo_unreserve(rdev->vce.vcpu_bo);
223
		dev_err(rdev->dev, "(%d) VCE map failed\n", r);
224
		return r;
225
	}
226
 
227
	memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
228
 
229
	radeon_bo_kunmap(rdev->vce.vcpu_bo);
230
 
231
	radeon_bo_unreserve(rdev->vce.vcpu_bo);
232
 
233
	return 0;
234
}
235
 
236
/**
237
 * radeon_vce_idle_work_handler - power off VCE
238
 *
239
 * @work: pointer to work structure
240
 *
241
 * power of VCE when it's not used any more
242
 */
243
static void radeon_vce_idle_work_handler(struct work_struct *work)
244
{
245
	struct radeon_device *rdev =
246
		container_of(work, struct radeon_device, vce.idle_work.work);
247
 
248
	if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) &&
249
	    (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) {
250
		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
251
			radeon_dpm_enable_vce(rdev, false);
252
		} else {
253
			radeon_set_vce_clocks(rdev, 0, 0);
254
		}
255
	} else {
256
		schedule_delayed_work(&rdev->vce.idle_work,
257
				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
258
	}
259
}
260
 
261
/**
262
 * radeon_vce_note_usage - power up VCE
263
 *
264
 * @rdev: radeon_device pointer
265
 *
266
 * Make sure VCE is powerd up when we want to use it
267
 */
268
void radeon_vce_note_usage(struct radeon_device *rdev)
269
{
270
	bool streams_changed = false;
271
	bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
272
	set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
273
					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
274
 
275
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
276
		/* XXX figure out if the streams changed */
277
		streams_changed = false;
278
	}
279
 
280
	if (set_clocks || streams_changed) {
281
		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
282
			radeon_dpm_enable_vce(rdev, true);
283
		} else {
284
			radeon_set_vce_clocks(rdev, 53300, 40000);
285
		}
286
	}
287
}
288
 
289
/**
290
 * radeon_vce_free_handles - free still open VCE handles
291
 *
292
 * @rdev: radeon_device pointer
293
 * @filp: drm file pointer
294
 *
295
 * Close all VCE handles still open by this file pointer
296
 */
297
void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
298
{
299
	int i, r;
300
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
301
		uint32_t handle = atomic_read(&rdev->vce.handles[i]);
302
		if (!handle || rdev->vce.filp[i] != filp)
303
			continue;
304
 
305
		radeon_vce_note_usage(rdev);
306
 
307
		r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
308
					       handle, NULL);
309
		if (r)
310
			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
311
 
312
		rdev->vce.filp[i] = NULL;
313
		atomic_set(&rdev->vce.handles[i], 0);
314
	}
315
}
316
 
317
/**
318
 * radeon_vce_get_create_msg - generate a VCE create msg
319
 *
320
 * @rdev: radeon_device pointer
321
 * @ring: ring we should submit the msg to
322
 * @handle: VCE session handle to use
323
 * @fence: optional fence to return
324
 *
325
 * Open up a stream for HW test
326
 */
327
int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
328
			      uint32_t handle, struct radeon_fence **fence)
329
{
330
	const unsigned ib_size_dw = 1024;
331
	struct radeon_ib ib;
332
	uint64_t dummy;
333
	int i, r;
334
 
335
	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
336
	if (r) {
337
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
338
		return r;
339
	}
340
 
341
	dummy = ib.gpu_addr + 1024;
342
 
343
	/* stitch together an VCE create msg */
344
	ib.length_dw = 0;
345
	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
346
	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
347
	ib.ptr[ib.length_dw++] = handle;
348
 
349
	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
350
	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
351
	ib.ptr[ib.length_dw++] = 0x00000000;
352
	ib.ptr[ib.length_dw++] = 0x00000042;
353
	ib.ptr[ib.length_dw++] = 0x0000000a;
354
	ib.ptr[ib.length_dw++] = 0x00000001;
355
	ib.ptr[ib.length_dw++] = 0x00000080;
356
	ib.ptr[ib.length_dw++] = 0x00000060;
357
	ib.ptr[ib.length_dw++] = 0x00000100;
358
	ib.ptr[ib.length_dw++] = 0x00000100;
359
	ib.ptr[ib.length_dw++] = 0x0000000c;
360
	ib.ptr[ib.length_dw++] = 0x00000000;
361
 
362
	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
363
	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
364
	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
365
	ib.ptr[ib.length_dw++] = dummy;
366
	ib.ptr[ib.length_dw++] = 0x00000001;
367
 
368
	for (i = ib.length_dw; i < ib_size_dw; ++i)
369
		ib.ptr[i] = 0x0;
370
 
371
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
372
	if (r) {
373
	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
374
	}
375
 
376
	if (fence)
377
		*fence = radeon_fence_ref(ib.fence);
378
 
379
	radeon_ib_free(rdev, &ib);
380
 
381
	return r;
382
}
383
 
384
/**
385
 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
386
 *
387
 * @rdev: radeon_device pointer
388
 * @ring: ring we should submit the msg to
389
 * @handle: VCE session handle to use
390
 * @fence: optional fence to return
391
 *
392
 * Close up a stream for HW test or if userspace failed to do so
393
 */
394
int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
395
			       uint32_t handle, struct radeon_fence **fence)
396
{
397
	const unsigned ib_size_dw = 1024;
398
	struct radeon_ib ib;
399
	uint64_t dummy;
400
	int i, r;
401
 
402
	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
403
	if (r) {
404
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
405
		return r;
406
	}
407
 
408
	dummy = ib.gpu_addr + 1024;
409
 
410
	/* stitch together an VCE destroy msg */
411
	ib.length_dw = 0;
412
	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
413
	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
414
	ib.ptr[ib.length_dw++] = handle;
415
 
416
	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
417
	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
418
	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
419
	ib.ptr[ib.length_dw++] = dummy;
420
	ib.ptr[ib.length_dw++] = 0x00000001;
421
 
422
	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
423
	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
424
 
425
	for (i = ib.length_dw; i < ib_size_dw; ++i)
426
		ib.ptr[i] = 0x0;
427
 
428
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
429
	if (r) {
430
	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
431
	}
432
 
433
	if (fence)
434
		*fence = radeon_fence_ref(ib.fence);
435
 
436
	radeon_ib_free(rdev, &ib);
437
 
438
	return r;
439
}
440
 
441
/**
442
 * radeon_vce_cs_reloc - command submission relocation
443
 *
444
 * @p: parser context
445
 * @lo: address of lower dword
446
 * @hi: address of higher dword
447
 * @size: size of checker for relocation buffer
448
 *
449
 * Patch relocation inside command stream with real buffer address
450
 */
451
int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
452
			unsigned size)
453
{
454
	struct radeon_cs_chunk *relocs_chunk;
455
	struct radeon_cs_reloc *reloc;
456
	uint64_t start, end, offset;
457
	unsigned idx;
458
 
459
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
460
	offset = radeon_get_ib_value(p, lo);
461
	idx = radeon_get_ib_value(p, hi);
462
 
463
	if (idx >= relocs_chunk->length_dw) {
464
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
465
			  idx, relocs_chunk->length_dw);
466
		return -EINVAL;
467
	}
468
 
469
	reloc = p->relocs_ptr[(idx / 4)];
470
	start = reloc->gpu_offset;
471
	end = start + radeon_bo_size(reloc->robj);
472
	start += offset;
473
 
474
	p->ib.ptr[lo] = start & 0xFFFFFFFF;
475
	p->ib.ptr[hi] = start >> 32;
476
 
477
	if (end <= start) {
478
		DRM_ERROR("invalid reloc offset %llX!\n", offset);
479
		return -EINVAL;
480
	}
481
	if ((end - start) < size) {
482
		DRM_ERROR("buffer to small (%d / %d)!\n",
483
			(unsigned)(end - start), size);
484
		return -EINVAL;
485
	}
486
 
487
	return 0;
488
}
489
 
490
/**
491
 * radeon_vce_validate_handle - validate stream handle
492
 *
493
 * @p: parser context
494
 * @handle: handle to validate
495
 *
496
 * Validates the handle and return the found session index or -EINVAL
497
 * we we don't have another free session index.
498
 */
499
int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
500
{
501
	unsigned i;
502
 
503
	/* validate the handle */
504
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
505
		if (atomic_read(&p->rdev->vce.handles[i]) == handle)
506
			return i;
507
	}
508
 
509
	/* handle not found try to alloc a new one */
510
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
511
		if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
512
			p->rdev->vce.filp[i] = p->filp;
513
			p->rdev->vce.img_size[i] = 0;
514
			return i;
515
		}
516
	}
517
 
518
	DRM_ERROR("No more free VCE handles!\n");
519
	return -EINVAL;
520
}
521
 
522
/**
523
 * radeon_vce_cs_parse - parse and validate the command stream
524
 *
525
 * @p: parser context
526
 *
527
 */
528
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
529
{
530
	int session_idx = -1;
531
	bool destroyed = false;
532
	uint32_t tmp, handle = 0;
533
	uint32_t *size = &tmp;
534
	int i, r;
535
 
536
	while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
537
		uint32_t len = radeon_get_ib_value(p, p->idx);
538
		uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
539
 
540
		if ((len < 8) || (len & 3)) {
541
			DRM_ERROR("invalid VCE command length (%d)!\n", len);
542
                	return -EINVAL;
543
		}
544
 
545
		if (destroyed) {
546
			DRM_ERROR("No other command allowed after destroy!\n");
547
			return -EINVAL;
548
		}
549
 
550
		switch (cmd) {
551
		case 0x00000001: // session
552
			handle = radeon_get_ib_value(p, p->idx + 2);
553
			session_idx = radeon_vce_validate_handle(p, handle);
554
			if (session_idx < 0)
555
				return session_idx;
556
			size = &p->rdev->vce.img_size[session_idx];
557
			break;
558
 
559
		case 0x00000002: // task info
560
			break;
561
 
562
		case 0x01000001: // create
563
			*size = radeon_get_ib_value(p, p->idx + 8) *
564
				radeon_get_ib_value(p, p->idx + 10) *
565
				8 * 3 / 2;
566
			break;
567
 
568
		case 0x04000001: // config extension
569
		case 0x04000002: // pic control
570
		case 0x04000005: // rate control
571
		case 0x04000007: // motion estimation
572
		case 0x04000008: // rdo
573
			break;
574
 
575
		case 0x03000001: // encode
576
			r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
577
						*size);
578
			if (r)
579
				return r;
580
 
581
			r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
582
						*size / 3);
583
			if (r)
584
				return r;
585
			break;
586
 
587
		case 0x02000001: // destroy
588
			destroyed = true;
589
			break;
590
 
591
		case 0x05000001: // context buffer
592
			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
593
						*size * 2);
594
			if (r)
595
				return r;
596
			break;
597
 
598
		case 0x05000004: // video bitstream buffer
599
			tmp = radeon_get_ib_value(p, p->idx + 4);
600
			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
601
						tmp);
602
			if (r)
603
				return r;
604
			break;
605
 
606
		case 0x05000005: // feedback buffer
607
			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
608
						4096);
609
			if (r)
610
				return r;
611
			break;
612
 
613
		default:
614
			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
615
			return -EINVAL;
616
		}
617
 
618
		if (session_idx == -1) {
619
			DRM_ERROR("no session command at start of IB\n");
620
			return -EINVAL;
621
		}
622
 
623
		p->idx += len / 4;
624
	}
625
 
626
	if (destroyed) {
627
		/* IB contains a destroy msg, free the handle */
628
		for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
629
			atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
630
	}
631
 
632
	return 0;
633
}
634
 
635
/**
636
 * radeon_vce_semaphore_emit - emit a semaphore command
637
 *
638
 * @rdev: radeon_device pointer
639
 * @ring: engine to use
640
 * @semaphore: address of semaphore
641
 * @emit_wait: true=emit wait, false=emit signal
642
 *
643
 */
644
bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
645
			       struct radeon_ring *ring,
646
			       struct radeon_semaphore *semaphore,
647
			       bool emit_wait)
648
{
649
	uint64_t addr = semaphore->gpu_addr;
650
 
651
	radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
652
	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
653
	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
654
	radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
655
	if (!emit_wait)
656
		radeon_ring_write(ring, VCE_CMD_END);
657
 
658
	return true;
659
}
660
 
661
/**
662
 * radeon_vce_ib_execute - execute indirect buffer
663
 *
664
 * @rdev: radeon_device pointer
665
 * @ib: the IB to execute
666
 *
667
 */
668
void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
669
{
670
	struct radeon_ring *ring = &rdev->ring[ib->ring];
671
	radeon_ring_write(ring, VCE_CMD_IB);
672
	radeon_ring_write(ring, ib->gpu_addr);
673
	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
674
	radeon_ring_write(ring, ib->length_dw);
675
}
676
 
677
/**
678
 * radeon_vce_fence_emit - add a fence command to the ring
679
 *
680
 * @rdev: radeon_device pointer
681
 * @fence: the fence
682
 *
683
 */
684
void radeon_vce_fence_emit(struct radeon_device *rdev,
685
			   struct radeon_fence *fence)
686
{
687
	struct radeon_ring *ring = &rdev->ring[fence->ring];
688
	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
689
 
690
	radeon_ring_write(ring, VCE_CMD_FENCE);
691
	radeon_ring_write(ring, addr);
692
	radeon_ring_write(ring, upper_32_bits(addr));
693
	radeon_ring_write(ring, fence->seq);
694
	radeon_ring_write(ring, VCE_CMD_TRAP);
695
	radeon_ring_write(ring, VCE_CMD_END);
696
}
697
 
698
/**
699
 * radeon_vce_ring_test - test if VCE ring is working
700
 *
701
 * @rdev: radeon_device pointer
702
 * @ring: the engine to test on
703
 *
704
 */
705
int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
706
{
707
	uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
708
	unsigned i;
709
	int r;
710
 
711
	r = radeon_ring_lock(rdev, ring, 16);
712
	if (r) {
713
		DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
714
			  ring->idx, r);
715
		return r;
716
	}
717
	radeon_ring_write(ring, VCE_CMD_END);
718
	radeon_ring_unlock_commit(rdev, ring, false);
719
 
720
	for (i = 0; i < rdev->usec_timeout; i++) {
721
	        if (vce_v1_0_get_rptr(rdev, ring) != rptr)
722
	                break;
723
	        DRM_UDELAY(1);
724
	}
725
 
726
	if (i < rdev->usec_timeout) {
727
	        DRM_INFO("ring test on %d succeeded in %d usecs\n",
728
	                 ring->idx, i);
729
	} else {
730
	        DRM_ERROR("radeon: ring %d test failed\n",
731
	                  ring->idx);
732
	        r = -ETIMEDOUT;
733
	}
734
 
735
	return r;
736
}
737
 
738
/**
739
 * radeon_vce_ib_test - test if VCE IBs are working
740
 *
741
 * @rdev: radeon_device pointer
742
 * @ring: the engine to test on
743
 *
744
 */
745
int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
746
{
747
	struct radeon_fence *fence = NULL;
748
	int r;
749
 
750
	r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
751
	if (r) {
752
		DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
753
		goto error;
754
	}
755
 
756
	r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
757
	if (r) {
758
		DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
759
		goto error;
760
	}
761
 
762
	r = radeon_fence_wait(fence, false);
763
	if (r) {
764
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
765
	} else {
766
	        DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
767
	}
768
error:
769
	radeon_fence_unref(&fence);
770
	return r;
771
}