Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2013 Advanced Micro Devices, Inc.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 * Authors: Christian König 
26
 */
27
 
28
#include 
29
#include 
30
#include 
31
#include 
32
 
33
#include "radeon.h"
34
#include "radeon_asic.h"
35
#include "sid.h"
36
 
37
/* 1 second timeout */
38
#define VCE_IDLE_TIMEOUT_MS	1000
39
 
40
/* Firmware Names */
41
#define FIRMWARE_BONAIRE	"radeon/BONAIRE_vce.bin"
42
 
43
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
44
 
45
static void radeon_vce_idle_work_handler(struct work_struct *work);
46
 
47
/**
48
 * radeon_vce_init - allocate memory, load vce firmware
49
 *
50
 * @rdev: radeon_device pointer
51
 *
52
 * First step to get VCE online, allocate memory and load the firmware
53
 */
54
int radeon_vce_init(struct radeon_device *rdev)
55
{
56
	static const char *fw_version = "[ATI LIB=VCEFW,";
57
	static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
58
	unsigned long size;
59
	const char *fw_name, *c;
60
	uint8_t start, mid, end;
61
	int i, r;
62
 
63
	INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
64
 
65
	switch (rdev->family) {
66
	case CHIP_BONAIRE:
67
	case CHIP_KAVERI:
68
	case CHIP_KABINI:
69
	case CHIP_HAWAII:
70
	case CHIP_MULLINS:
71
		fw_name = FIRMWARE_BONAIRE;
72
		break;
73
 
74
	default:
75
		return -EINVAL;
76
	}
77
 
78
	r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
79
	if (r) {
80
		dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
81
			fw_name);
82
		return r;
83
	}
84
 
85
	/* search for firmware version */
86
 
87
	size = rdev->vce_fw->size - strlen(fw_version) - 9;
88
	c = rdev->vce_fw->data;
89
	for (;size > 0; --size, ++c)
90
		if (strncmp(c, fw_version, strlen(fw_version)) == 0)
91
			break;
92
 
93
	if (size == 0)
94
		return -EINVAL;
95
 
96
	c += strlen(fw_version);
97
	if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
98
		return -EINVAL;
99
 
100
	/* search for feedback version */
101
 
102
	size = rdev->vce_fw->size - strlen(fb_version) - 3;
103
	c = rdev->vce_fw->data;
104
	for (;size > 0; --size, ++c)
105
		if (strncmp(c, fb_version, strlen(fb_version)) == 0)
106
			break;
107
 
108
	if (size == 0)
109
		return -EINVAL;
110
 
111
	c += strlen(fb_version);
112
	if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
113
		return -EINVAL;
114
 
115
	DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
116
		 start, mid, end, rdev->vce.fb_version);
117
 
118
	rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
119
 
120
	/* we can only work with this fw version for now */
121
	if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8)))
122
		return -EINVAL;
123
 
124
	/* allocate firmware, stack and heap BO */
125
 
126
	size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
127
	       RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
128
	r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
5271 serge 129
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL,
130
			     &rdev->vce.vcpu_bo);
5078 serge 131
	if (r) {
132
		dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
133
		return r;
134
	}
135
 
136
	r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
137
	if (r) {
138
		radeon_bo_unref(&rdev->vce.vcpu_bo);
139
		dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
140
		return r;
141
	}
142
 
143
	r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
144
			  &rdev->vce.gpu_addr);
145
	radeon_bo_unreserve(rdev->vce.vcpu_bo);
146
	if (r) {
147
		radeon_bo_unref(&rdev->vce.vcpu_bo);
148
		dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
149
		return r;
150
	}
151
 
152
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
153
		atomic_set(&rdev->vce.handles[i], 0);
154
		rdev->vce.filp[i] = NULL;
155
        }
156
 
157
	return 0;
158
}
159
 
160
/**
161
 * radeon_vce_fini - free memory
162
 *
163
 * @rdev: radeon_device pointer
164
 *
165
 * Last step on VCE teardown, free firmware memory
166
 */
167
void radeon_vce_fini(struct radeon_device *rdev)
168
{
169
	if (rdev->vce.vcpu_bo == NULL)
170
		return;
171
 
172
	radeon_bo_unref(&rdev->vce.vcpu_bo);
173
 
174
	release_firmware(rdev->vce_fw);
175
}
176
 
177
/**
178
 * radeon_vce_suspend - unpin VCE fw memory
179
 *
180
 * @rdev: radeon_device pointer
181
 *
182
 */
183
int radeon_vce_suspend(struct radeon_device *rdev)
184
{
185
	int i;
186
 
187
	if (rdev->vce.vcpu_bo == NULL)
188
		return 0;
189
 
190
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
191
		if (atomic_read(&rdev->vce.handles[i]))
192
			break;
193
 
194
	if (i == RADEON_MAX_VCE_HANDLES)
195
		return 0;
196
 
197
	/* TODO: suspending running encoding sessions isn't supported */
198
	return -EINVAL;
199
}
200
 
201
/**
202
 * radeon_vce_resume - pin VCE fw memory
203
 *
204
 * @rdev: radeon_device pointer
205
 *
206
 */
207
int radeon_vce_resume(struct radeon_device *rdev)
208
{
209
	void *cpu_addr;
210
	int r;
211
 
212
	if (rdev->vce.vcpu_bo == NULL)
213
		return -EINVAL;
214
 
215
	r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
216
	if (r) {
217
		dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
218
		return r;
219
	}
220
 
221
	r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
222
	if (r) {
223
		radeon_bo_unreserve(rdev->vce.vcpu_bo);
224
		dev_err(rdev->dev, "(%d) VCE map failed\n", r);
225
		return r;
226
	}
227
 
228
	memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
229
 
230
	radeon_bo_kunmap(rdev->vce.vcpu_bo);
231
 
232
	radeon_bo_unreserve(rdev->vce.vcpu_bo);
233
 
234
	return 0;
235
}
236
 
237
/**
238
 * radeon_vce_idle_work_handler - power off VCE
239
 *
240
 * @work: pointer to work structure
241
 *
242
 * power of VCE when it's not used any more
243
 */
244
static void radeon_vce_idle_work_handler(struct work_struct *work)
245
{
246
	struct radeon_device *rdev =
247
		container_of(work, struct radeon_device, vce.idle_work.work);
248
 
249
	if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) &&
250
	    (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) {
251
		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
252
			radeon_dpm_enable_vce(rdev, false);
253
		} else {
254
			radeon_set_vce_clocks(rdev, 0, 0);
255
		}
256
	} else {
257
		schedule_delayed_work(&rdev->vce.idle_work,
258
				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
259
	}
260
}
261
 
262
/**
263
 * radeon_vce_note_usage - power up VCE
264
 *
265
 * @rdev: radeon_device pointer
266
 *
267
 * Make sure VCE is powerd up when we want to use it
268
 */
269
void radeon_vce_note_usage(struct radeon_device *rdev)
270
{
271
	bool streams_changed = false;
272
	bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
273
	set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
274
					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
275
 
276
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
277
		/* XXX figure out if the streams changed */
278
		streams_changed = false;
279
	}
280
 
281
	if (set_clocks || streams_changed) {
282
		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
283
			radeon_dpm_enable_vce(rdev, true);
284
		} else {
285
			radeon_set_vce_clocks(rdev, 53300, 40000);
286
		}
287
	}
288
}
289
 
290
/**
291
 * radeon_vce_free_handles - free still open VCE handles
292
 *
293
 * @rdev: radeon_device pointer
294
 * @filp: drm file pointer
295
 *
296
 * Close all VCE handles still open by this file pointer
297
 */
298
void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
299
{
300
	int i, r;
301
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
302
		uint32_t handle = atomic_read(&rdev->vce.handles[i]);
303
		if (!handle || rdev->vce.filp[i] != filp)
304
			continue;
305
 
306
		radeon_vce_note_usage(rdev);
307
 
308
		r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
309
					       handle, NULL);
310
		if (r)
311
			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
312
 
313
		rdev->vce.filp[i] = NULL;
314
		atomic_set(&rdev->vce.handles[i], 0);
315
	}
316
}
317
 
318
/**
319
 * radeon_vce_get_create_msg - generate a VCE create msg
320
 *
321
 * @rdev: radeon_device pointer
322
 * @ring: ring we should submit the msg to
323
 * @handle: VCE session handle to use
324
 * @fence: optional fence to return
325
 *
326
 * Open up a stream for HW test
327
 */
328
int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
329
			      uint32_t handle, struct radeon_fence **fence)
330
{
331
	const unsigned ib_size_dw = 1024;
332
	struct radeon_ib ib;
333
	uint64_t dummy;
334
	int i, r;
335
 
336
	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
337
	if (r) {
338
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
339
		return r;
340
	}
341
 
342
	dummy = ib.gpu_addr + 1024;
343
 
344
	/* stitch together an VCE create msg */
345
	ib.length_dw = 0;
346
	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
347
	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
348
	ib.ptr[ib.length_dw++] = handle;
349
 
350
	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
351
	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
352
	ib.ptr[ib.length_dw++] = 0x00000000;
353
	ib.ptr[ib.length_dw++] = 0x00000042;
354
	ib.ptr[ib.length_dw++] = 0x0000000a;
355
	ib.ptr[ib.length_dw++] = 0x00000001;
356
	ib.ptr[ib.length_dw++] = 0x00000080;
357
	ib.ptr[ib.length_dw++] = 0x00000060;
358
	ib.ptr[ib.length_dw++] = 0x00000100;
359
	ib.ptr[ib.length_dw++] = 0x00000100;
360
	ib.ptr[ib.length_dw++] = 0x0000000c;
361
	ib.ptr[ib.length_dw++] = 0x00000000;
362
 
363
	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
364
	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
365
	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
366
	ib.ptr[ib.length_dw++] = dummy;
367
	ib.ptr[ib.length_dw++] = 0x00000001;
368
 
369
	for (i = ib.length_dw; i < ib_size_dw; ++i)
370
		ib.ptr[i] = 0x0;
371
 
372
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
373
	if (r) {
374
	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
375
	}
376
 
377
	if (fence)
378
		*fence = radeon_fence_ref(ib.fence);
379
 
380
	radeon_ib_free(rdev, &ib);
381
 
382
	return r;
383
}
384
 
385
/**
386
 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
387
 *
388
 * @rdev: radeon_device pointer
389
 * @ring: ring we should submit the msg to
390
 * @handle: VCE session handle to use
391
 * @fence: optional fence to return
392
 *
393
 * Close up a stream for HW test or if userspace failed to do so
394
 */
395
int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
396
			       uint32_t handle, struct radeon_fence **fence)
397
{
398
	const unsigned ib_size_dw = 1024;
399
	struct radeon_ib ib;
400
	uint64_t dummy;
401
	int i, r;
402
 
403
	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
404
	if (r) {
405
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
406
		return r;
407
	}
408
 
409
	dummy = ib.gpu_addr + 1024;
410
 
411
	/* stitch together an VCE destroy msg */
412
	ib.length_dw = 0;
413
	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
414
	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
415
	ib.ptr[ib.length_dw++] = handle;
416
 
417
	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
418
	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
419
	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
420
	ib.ptr[ib.length_dw++] = dummy;
421
	ib.ptr[ib.length_dw++] = 0x00000001;
422
 
423
	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
424
	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
425
 
426
	for (i = ib.length_dw; i < ib_size_dw; ++i)
427
		ib.ptr[i] = 0x0;
428
 
429
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
430
	if (r) {
431
	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
432
	}
433
 
434
	if (fence)
435
		*fence = radeon_fence_ref(ib.fence);
436
 
437
	radeon_ib_free(rdev, &ib);
438
 
439
	return r;
440
}
441
 
442
/**
443
 * radeon_vce_cs_reloc - command submission relocation
444
 *
445
 * @p: parser context
446
 * @lo: address of lower dword
447
 * @hi: address of higher dword
448
 * @size: size of checker for relocation buffer
449
 *
450
 * Patch relocation inside command stream with real buffer address
451
 */
452
int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
453
			unsigned size)
454
{
455
	struct radeon_cs_chunk *relocs_chunk;
5271 serge 456
	struct radeon_bo_list *reloc;
5078 serge 457
	uint64_t start, end, offset;
458
	unsigned idx;
459
 
5271 serge 460
	relocs_chunk = p->chunk_relocs;
5078 serge 461
	offset = radeon_get_ib_value(p, lo);
462
	idx = radeon_get_ib_value(p, hi);
463
 
464
	if (idx >= relocs_chunk->length_dw) {
465
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
466
			  idx, relocs_chunk->length_dw);
467
		return -EINVAL;
468
	}
469
 
5271 serge 470
	reloc = &p->relocs[(idx / 4)];
5078 serge 471
	start = reloc->gpu_offset;
472
	end = start + radeon_bo_size(reloc->robj);
473
	start += offset;
474
 
475
	p->ib.ptr[lo] = start & 0xFFFFFFFF;
476
	p->ib.ptr[hi] = start >> 32;
477
 
478
	if (end <= start) {
479
		DRM_ERROR("invalid reloc offset %llX!\n", offset);
480
		return -EINVAL;
481
	}
482
	if ((end - start) < size) {
483
		DRM_ERROR("buffer to small (%d / %d)!\n",
484
			(unsigned)(end - start), size);
485
		return -EINVAL;
486
	}
487
 
488
	return 0;
489
}
490
 
491
/**
492
 * radeon_vce_validate_handle - validate stream handle
493
 *
494
 * @p: parser context
495
 * @handle: handle to validate
496
 *
497
 * Validates the handle and return the found session index or -EINVAL
498
 * we we don't have another free session index.
499
 */
500
int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
501
{
502
	unsigned i;
503
 
504
	/* validate the handle */
505
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
506
		if (atomic_read(&p->rdev->vce.handles[i]) == handle)
507
			return i;
508
	}
509
 
510
	/* handle not found try to alloc a new one */
511
	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
512
		if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
513
			p->rdev->vce.filp[i] = p->filp;
514
			p->rdev->vce.img_size[i] = 0;
515
			return i;
516
		}
517
	}
518
 
519
	DRM_ERROR("No more free VCE handles!\n");
520
	return -EINVAL;
521
}
522
 
523
/**
524
 * radeon_vce_cs_parse - parse and validate the command stream
525
 *
526
 * @p: parser context
527
 *
528
 */
529
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
530
{
531
	int session_idx = -1;
532
	bool destroyed = false;
533
	uint32_t tmp, handle = 0;
534
	uint32_t *size = &tmp;
535
	int i, r;
536
 
5271 serge 537
	while (p->idx < p->chunk_ib->length_dw) {
5078 serge 538
		uint32_t len = radeon_get_ib_value(p, p->idx);
539
		uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
540
 
541
		if ((len < 8) || (len & 3)) {
542
			DRM_ERROR("invalid VCE command length (%d)!\n", len);
543
                	return -EINVAL;
544
		}
545
 
546
		if (destroyed) {
547
			DRM_ERROR("No other command allowed after destroy!\n");
548
			return -EINVAL;
549
		}
550
 
551
		switch (cmd) {
552
		case 0x00000001: // session
553
			handle = radeon_get_ib_value(p, p->idx + 2);
554
			session_idx = radeon_vce_validate_handle(p, handle);
555
			if (session_idx < 0)
556
				return session_idx;
557
			size = &p->rdev->vce.img_size[session_idx];
558
			break;
559
 
560
		case 0x00000002: // task info
561
			break;
562
 
563
		case 0x01000001: // create
564
			*size = radeon_get_ib_value(p, p->idx + 8) *
565
				radeon_get_ib_value(p, p->idx + 10) *
566
				8 * 3 / 2;
567
			break;
568
 
569
		case 0x04000001: // config extension
570
		case 0x04000002: // pic control
571
		case 0x04000005: // rate control
572
		case 0x04000007: // motion estimation
573
		case 0x04000008: // rdo
574
			break;
575
 
576
		case 0x03000001: // encode
577
			r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
578
						*size);
579
			if (r)
580
				return r;
581
 
582
			r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
583
						*size / 3);
584
			if (r)
585
				return r;
586
			break;
587
 
588
		case 0x02000001: // destroy
589
			destroyed = true;
590
			break;
591
 
592
		case 0x05000001: // context buffer
593
			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
594
						*size * 2);
595
			if (r)
596
				return r;
597
			break;
598
 
599
		case 0x05000004: // video bitstream buffer
600
			tmp = radeon_get_ib_value(p, p->idx + 4);
601
			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
602
						tmp);
603
			if (r)
604
				return r;
605
			break;
606
 
607
		case 0x05000005: // feedback buffer
608
			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
609
						4096);
610
			if (r)
611
				return r;
612
			break;
613
 
614
		default:
615
			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
616
			return -EINVAL;
617
		}
618
 
619
		if (session_idx == -1) {
620
			DRM_ERROR("no session command at start of IB\n");
621
			return -EINVAL;
622
		}
623
 
624
		p->idx += len / 4;
625
	}
626
 
627
	if (destroyed) {
628
		/* IB contains a destroy msg, free the handle */
629
		for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
630
			atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
631
	}
632
 
633
	return 0;
634
}
635
 
636
/**
637
 * radeon_vce_semaphore_emit - emit a semaphore command
638
 *
639
 * @rdev: radeon_device pointer
640
 * @ring: engine to use
641
 * @semaphore: address of semaphore
642
 * @emit_wait: true=emit wait, false=emit signal
643
 *
644
 */
645
bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
646
			       struct radeon_ring *ring,
647
			       struct radeon_semaphore *semaphore,
648
			       bool emit_wait)
649
{
650
	uint64_t addr = semaphore->gpu_addr;
651
 
652
	radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
653
	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
654
	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
655
	radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
656
	if (!emit_wait)
657
		radeon_ring_write(ring, VCE_CMD_END);
658
 
659
	return true;
660
}
661
 
662
/**
663
 * radeon_vce_ib_execute - execute indirect buffer
664
 *
665
 * @rdev: radeon_device pointer
666
 * @ib: the IB to execute
667
 *
668
 */
669
void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
670
{
671
	struct radeon_ring *ring = &rdev->ring[ib->ring];
672
	radeon_ring_write(ring, VCE_CMD_IB);
673
	radeon_ring_write(ring, ib->gpu_addr);
674
	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
675
	radeon_ring_write(ring, ib->length_dw);
676
}
677
 
678
/**
679
 * radeon_vce_fence_emit - add a fence command to the ring
680
 *
681
 * @rdev: radeon_device pointer
682
 * @fence: the fence
683
 *
684
 */
685
void radeon_vce_fence_emit(struct radeon_device *rdev,
686
			   struct radeon_fence *fence)
687
{
688
	struct radeon_ring *ring = &rdev->ring[fence->ring];
689
	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
690
 
691
	radeon_ring_write(ring, VCE_CMD_FENCE);
692
	radeon_ring_write(ring, addr);
693
	radeon_ring_write(ring, upper_32_bits(addr));
694
	radeon_ring_write(ring, fence->seq);
695
	radeon_ring_write(ring, VCE_CMD_TRAP);
696
	radeon_ring_write(ring, VCE_CMD_END);
697
}
698
 
699
/**
700
 * radeon_vce_ring_test - test if VCE ring is working
701
 *
702
 * @rdev: radeon_device pointer
703
 * @ring: the engine to test on
704
 *
705
 */
706
int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
707
{
708
	uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
709
	unsigned i;
710
	int r;
711
 
712
	r = radeon_ring_lock(rdev, ring, 16);
713
	if (r) {
714
		DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
715
			  ring->idx, r);
716
		return r;
717
	}
718
	radeon_ring_write(ring, VCE_CMD_END);
719
	radeon_ring_unlock_commit(rdev, ring, false);
720
 
721
	for (i = 0; i < rdev->usec_timeout; i++) {
722
	        if (vce_v1_0_get_rptr(rdev, ring) != rptr)
723
	                break;
724
	        DRM_UDELAY(1);
725
	}
726
 
727
	if (i < rdev->usec_timeout) {
728
	        DRM_INFO("ring test on %d succeeded in %d usecs\n",
729
	                 ring->idx, i);
730
	} else {
731
	        DRM_ERROR("radeon: ring %d test failed\n",
732
	                  ring->idx);
733
	        r = -ETIMEDOUT;
734
	}
735
 
736
	return r;
737
}
738
 
739
/**
740
 * radeon_vce_ib_test - test if VCE IBs are working
741
 *
742
 * @rdev: radeon_device pointer
743
 * @ring: the engine to test on
744
 *
745
 */
746
int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
747
{
748
	struct radeon_fence *fence = NULL;
749
	int r;
750
 
751
	r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
752
	if (r) {
753
		DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
754
		goto error;
755
	}
756
 
757
	r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
758
	if (r) {
759
		DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
760
		goto error;
761
	}
762
 
763
	r = radeon_fence_wait(fence, false);
764
	if (r) {
765
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
766
	} else {
767
	        DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
768
	}
769
error:
770
	radeon_fence_unref(&fence);
771
	return r;
772
}