Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 44... Line 44...
44
#define FIRMWARE_CYPRESS	"radeon/CYPRESS_uvd.bin"
44
#define FIRMWARE_CYPRESS	"radeon/CYPRESS_uvd.bin"
45
#define FIRMWARE_SUMO		"radeon/SUMO_uvd.bin"
45
#define FIRMWARE_SUMO		"radeon/SUMO_uvd.bin"
46
#define FIRMWARE_TAHITI		"radeon/TAHITI_uvd.bin"
46
#define FIRMWARE_TAHITI		"radeon/TAHITI_uvd.bin"
47
#define FIRMWARE_BONAIRE	"radeon/BONAIRE_uvd.bin"
47
#define FIRMWARE_BONAIRE	"radeon/BONAIRE_uvd.bin"
Line -... Line 48...
-
 
48
 
-
 
49
MODULE_FIRMWARE(FIRMWARE_R600);
-
 
50
MODULE_FIRMWARE(FIRMWARE_RS780);
48
 
51
MODULE_FIRMWARE(FIRMWARE_RV770);
49
MODULE_FIRMWARE(FIRMWARE_RV710);
52
MODULE_FIRMWARE(FIRMWARE_RV710);
50
MODULE_FIRMWARE(FIRMWARE_CYPRESS);
53
MODULE_FIRMWARE(FIRMWARE_CYPRESS);
51
MODULE_FIRMWARE(FIRMWARE_SUMO);
54
MODULE_FIRMWARE(FIRMWARE_SUMO);
52
MODULE_FIRMWARE(FIRMWARE_TAHITI);
55
MODULE_FIRMWARE(FIRMWARE_TAHITI);
Line 113... Line 116...
113
			fw_name);
116
			fw_name);
114
		return r;
117
		return r;
115
	}
118
	}
Line 116... Line 119...
116
 
119
 
117
	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
120
	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
-
 
121
		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
118
		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
122
		  RADEON_GPU_PAGE_SIZE;
119
	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
123
	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
-
 
124
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
120
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
125
			     NULL, &rdev->uvd.vcpu_bo);
121
	if (r) {
126
	if (r) {
122
		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
127
		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123
		return r;
128
		return r;
Line 229... Line 234...
229
		memset(ptr, 0, size);
234
		memset(ptr, 0, size);
Line 230... Line 235...
230
 
235
 
231
	return 0;
236
	return 0;
Line 232... Line 237...
232
}
237
}
-
 
238
 
233
 
239
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
-
 
240
				       uint32_t allowed_domains)
-
 
241
{
-
 
242
	int i;
234
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
243
 
-
 
244
	for (i = 0; i < rbo->placement.num_placement; ++i) {
-
 
245
		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
-
 
246
		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
-
 
247
	}
-
 
248
 
-
 
249
	/* If it must be in VRAM it must be in the first segment as well */
-
 
250
	if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
-
 
251
		return;
-
 
252
 
-
 
253
	/* abort if we already have more than one placement */
-
 
254
	if (rbo->placement.num_placement > 1)
-
 
255
		return;
-
 
256
 
-
 
257
	/* add another 256MB segment */
235
{
258
	rbo->placements[1] = rbo->placements[0];
-
 
259
	rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
-
 
260
	rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
236
	rbo->placement.fpfn = 0 >> PAGE_SHIFT;
261
	rbo->placement.num_placement++;
Line 237... Line 262...
237
	rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
262
	rbo->placement.num_busy_placement++;
238
}
263
}
239
 
264
 
Line 354... Line 379...
354
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
379
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
355
			     unsigned offset, unsigned buf_sizes[])
380
			     unsigned offset, unsigned buf_sizes[])
356
{
381
{
357
	int32_t *msg, msg_type, handle;
382
	int32_t *msg, msg_type, handle;
358
	unsigned img_size = 0;
383
	unsigned img_size = 0;
-
 
384
	struct fence *f;
359
	void *ptr;
385
	void *ptr;
Line 360... Line 386...
360
 
386
 
Line 361... Line 387...
361
	int i, r;
387
	int i, r;
362
 
388
 
363
	if (offset & 0x3F) {
389
	if (offset & 0x3F) {
364
		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
390
		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
Line -... Line 391...
-
 
391
		return -EINVAL;
365
		return -EINVAL;
392
	}
366
	}
393
 
367
 
394
	f = reservation_object_get_excl(bo->tbo.resv);
368
	if (bo->tbo.sync_obj) {
395
	if (f) {
369
		r = radeon_fence_wait(bo->tbo.sync_obj, false);
396
		r = radeon_fence_wait((struct radeon_fence *)f, false);
370
		if (r) {
397
		if (r) {
371
			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
398
			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
Line 439... Line 466...
439
static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
466
static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
440
			       int data0, int data1,
467
			       int data0, int data1,
441
			       unsigned buf_sizes[], bool *has_msg_cmd)
468
			       unsigned buf_sizes[], bool *has_msg_cmd)
442
{
469
{
443
	struct radeon_cs_chunk *relocs_chunk;
470
	struct radeon_cs_chunk *relocs_chunk;
444
	struct radeon_cs_reloc *reloc;
471
	struct radeon_bo_list *reloc;
445
	unsigned idx, cmd, offset;
472
	unsigned idx, cmd, offset;
446
	uint64_t start, end;
473
	uint64_t start, end;
447
	int r;
474
	int r;
Line 448... Line 475...
448
 
475
 
449
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
476
	relocs_chunk = p->chunk_relocs;
450
	offset = radeon_get_ib_value(p, data0);
477
	offset = radeon_get_ib_value(p, data0);
451
	idx = radeon_get_ib_value(p, data1);
478
	idx = radeon_get_ib_value(p, data1);
452
	if (idx >= relocs_chunk->length_dw) {
479
	if (idx >= relocs_chunk->length_dw) {
453
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
480
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
454
			  idx, relocs_chunk->length_dw);
481
			  idx, relocs_chunk->length_dw);
455
		return -EINVAL;
482
		return -EINVAL;
Line 456... Line 483...
456
	}
483
	}
457
 
484
 
458
	reloc = p->relocs_ptr[(idx / 4)];
485
	reloc = &p->relocs[(idx / 4)];
459
	start = reloc->gpu_offset;
486
	start = reloc->gpu_offset;
Line 460... Line 487...
460
	end = start + radeon_bo_size(reloc->robj);
487
	end = start + radeon_bo_size(reloc->robj);
Line 561... Line 588...
561
		[0x00000001]	=	32 * 1024 * 1024,
588
		[0x00000001]	=	32 * 1024 * 1024,
562
		[0x00000002]	=	2048 * 1152 * 3,
589
		[0x00000002]	=	2048 * 1152 * 3,
563
		[0x00000003]	=	2048,
590
		[0x00000003]	=	2048,
564
	};
591
	};
Line 565... Line 592...
565
 
592
 
566
	if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
593
	if (p->chunk_ib->length_dw % 16) {
567
		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
594
		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
568
			  p->chunks[p->chunk_ib_idx].length_dw);
595
			  p->chunk_ib->length_dw);
569
		return -EINVAL;
596
		return -EINVAL;
Line 570... Line 597...
570
	}
597
	}
571
 
598
 
572
	if (p->chunk_relocs_idx == -1) {
599
	if (p->chunk_relocs == NULL) {
573
		DRM_ERROR("No relocation chunk !\n");
600
		DRM_ERROR("No relocation chunk !\n");
Line 591... Line 618...
591
			break;
618
			break;
592
		default:
619
		default:
593
			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
620
			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
594
			return -EINVAL;
621
			return -EINVAL;
595
		}
622
		}
596
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
623
	} while (p->idx < p->chunk_ib->length_dw);
Line 597... Line 624...
597
 
624
 
598
	if (!has_msg_cmd) {
625
	if (!has_msg_cmd) {
599
		DRM_ERROR("UVD-IBs need a msg command!\n");
626
		DRM_ERROR("UVD-IBs need a msg command!\n");
600
		return -EINVAL;
627
		return -EINVAL;
Line 601... Line 628...
601
	}
628
	}
602
 
629
 
Line 603... Line 630...
603
	return 0;
630
	return 0;
604
}
631
}
605
 
632
 
606
static int radeon_uvd_send_msg(struct radeon_device *rdev,
633
static int radeon_uvd_send_msg(struct radeon_device *rdev,
607
			       int ring, struct radeon_bo *bo,
-
 
608
			       struct radeon_fence **fence)
-
 
609
{
-
 
610
	struct ttm_validate_buffer tv;
634
			       int ring, uint64_t addr,
611
	struct ww_acquire_ctx ticket;
-
 
612
	struct list_head head;
635
			       struct radeon_fence **fence)
Line 613... Line -...
613
	struct radeon_ib ib;
-
 
614
	uint64_t addr;
-
 
615
	int i, r;
-
 
616
 
-
 
617
	memset(&tv, 0, sizeof(tv));
-
 
618
	tv.bo = &bo->tbo;
-
 
619
 
-
 
620
	INIT_LIST_HEAD(&head);
-
 
621
	list_add(&tv.head, &head);
-
 
622
 
-
 
623
	r = ttm_eu_reserve_buffers(&ticket, &head);
-
 
624
	if (r)
-
 
625
		return r;
-
 
626
 
-
 
627
	radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
-
 
628
	radeon_uvd_force_into_uvd_segment(bo);
-
 
629
 
-
 
630
	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
636
{
631
	if (r) 
637
	struct radeon_ib ib;
632
		goto err;
638
	int i, r;
Line 633... Line -...
633
 
-
 
634
	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
639
 
635
	if (r)
640
	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
636
		goto err;
641
	if (r)
637
 
642
		return r;
638
	addr = radeon_bo_gpu_offset(bo);
643
 
Line 645... Line 650...
645
	for (i = 6; i < 16; ++i)
650
	for (i = 6; i < 16; ++i)
646
		ib.ptr[i] = PACKET2(0);
651
		ib.ptr[i] = PACKET2(0);
647
	ib.length_dw = 16;
652
	ib.length_dw = 16;
Line 648... Line 653...
648
 
653
 
649
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
-
 
650
	if (r)
-
 
651
		goto err;
-
 
Line 652... Line 654...
652
	ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
654
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
653
 
655
 
Line 654... Line 656...
654
	if (fence)
656
	if (fence)
655
		*fence = radeon_fence_ref(ib.fence);
-
 
656
 
-
 
657
	radeon_ib_free(rdev, &ib);
-
 
658
	radeon_bo_unref(&bo);
-
 
659
	return 0;
-
 
660
 
657
		*fence = radeon_fence_ref(ib.fence);
661
err:
658
 
Line 662... Line 659...
662
	ttm_eu_backoff_reservation(&ticket, &head);
659
	radeon_ib_free(rdev, &ib);
663
	return r;
660
	return r;
664
}
661
}
665
 
662
 
666
/* multiple fence commands without any stream commands in between can
663
/* multiple fence commands without any stream commands in between can
667
   crash the vcpu so just try to emmit a dummy create/destroy msg to
664
   crash the vcpu so just try to emmit a dummy create/destroy msg to
-
 
665
   avoid this */
668
   avoid this */
666
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
669
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
667
			      uint32_t handle, struct radeon_fence **fence)
670
			      uint32_t handle, struct radeon_fence **fence)
-
 
Line 671... Line 668...
671
{
668
{
672
	struct radeon_bo *bo;
669
	/* we use the last page of the vcpu bo for the UVD message */
673
	uint32_t *msg;
-
 
674
	int r, i;
-
 
Line 675... Line -...
675
 
-
 
676
	r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
-
 
677
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
-
 
678
	if (r)
670
	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
679
		return r;
-
 
Line 680... Line 671...
680
 
671
		RADEON_GPU_PAGE_SIZE;
681
	r = radeon_bo_reserve(bo, false);
672
 
682
	if (r) {
-
 
683
		radeon_bo_unref(&bo);
-
 
684
		return r;
673
	uint32_t *msg = rdev->uvd.cpu_addr + offs;
685
	}
-
 
Line 686... Line 674...
686
 
674
	uint64_t addr = rdev->uvd.gpu_addr + offs;
687
	r = radeon_bo_kmap(bo, (void **)&msg);
675
 
688
	if (r) {
676
	int r, i;
689
		radeon_bo_unreserve(bo);
677
 
Line 704... Line 692...
704
	msg[9] = cpu_to_le32(0x00000000);
692
	msg[9] = cpu_to_le32(0x00000000);
705
	msg[10] = cpu_to_le32(0x01b37000);
693
	msg[10] = cpu_to_le32(0x01b37000);
706
	for (i = 11; i < 1024; ++i)
694
	for (i = 11; i < 1024; ++i)
707
		msg[i] = cpu_to_le32(0x0);
695
		msg[i] = cpu_to_le32(0x0);
Line 708... Line 696...
708
 
696
 
709
	radeon_bo_kunmap(bo);
697
	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
710
	radeon_bo_unreserve(bo);
-
 
711
 
698
	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
712
	return radeon_uvd_send_msg(rdev, ring, bo, fence);
699
	return r;
Line 713... Line 700...
713
}
700
}
714
 
701
 
715
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
702
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
-
 
703
			       uint32_t handle, struct radeon_fence **fence)
716
			       uint32_t handle, struct radeon_fence **fence)
704
{
717
{
705
	/* we use the last page of the vcpu bo for the UVD message */
718
	struct radeon_bo *bo;
-
 
Line 719... Line 706...
719
	uint32_t *msg;
706
	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
720
	int r, i;
707
		RADEON_GPU_PAGE_SIZE;
721
 
-
 
722
	r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
-
 
Line 723... Line -...
723
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
-
 
724
	if (r)
-
 
725
		return r;
-
 
726
 
708
 
727
	r = radeon_bo_reserve(bo, false);
-
 
Line 728... Line 709...
728
	if (r) {
709
	uint32_t *msg = rdev->uvd.cpu_addr + offs;
729
		radeon_bo_unref(&bo);
710
	uint64_t addr = rdev->uvd.gpu_addr + offs;
730
		return r;
-
 
731
	}
-
 
732
 
711
 
733
	r = radeon_bo_kmap(bo, (void **)&msg);
-
 
Line 734... Line 712...
734
	if (r) {
712
	int r, i;
735
		radeon_bo_unreserve(bo);
713
 
736
		radeon_bo_unref(&bo);
714
	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
737
		return r;
715
	if (r)
738
	}
716
		return r;
739
 
717
 
740
	/* stitch together an UVD destroy msg */
718
	/* stitch together an UVD destroy msg */
Line 741... Line 719...
741
	msg[0] = cpu_to_le32(0x00000de4);
719
	msg[0] = cpu_to_le32(0x00000de4);
742
	msg[1] = cpu_to_le32(0x00000002);
720
	msg[1] = cpu_to_le32(0x00000002);
743
	msg[2] = cpu_to_le32(handle);
-
 
744
	msg[3] = cpu_to_le32(0x00000000);
721
	msg[2] = cpu_to_le32(handle);
745
	for (i = 4; i < 1024; ++i)
722
	msg[3] = cpu_to_le32(0x00000000);
Line 746... Line 723...
746
		msg[i] = cpu_to_le32(0x0);
723
	for (i = 4; i < 1024; ++i)
747
 
724
		msg[i] = cpu_to_le32(0x0);
748
	radeon_bo_kunmap(bo);
725