Subversion Repositories Kolibri OS

Rev

Rev 5179 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5179 Rev 5271
Line 336... Line 336...
336
 */
336
 */
337
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
337
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
338
{
338
{
339
	struct radeon_ib ib;
339
	struct radeon_ib ib;
340
	unsigned i;
340
	unsigned i;
-
 
341
	unsigned index;
341
	int r;
342
	int r;
342
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
-
 
343
	u32 tmp = 0;
343
	u32 tmp = 0;
-
 
344
	u64 gpu_addr;
Line 344... Line -...
344
 
-
 
345
	if (!ptr) {
345
 
346
		DRM_ERROR("invalid vram scratch pointer\n");
346
	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
347
		return -EINVAL;
347
		index = R600_WB_DMA_RING_TEST_OFFSET;
-
 
348
	else
Line 348... Line -...
348
	}
-
 
349
 
349
		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
Line 350... Line 350...
350
	tmp = 0xCAFEDEAD;
350
 
351
	writel(tmp, ptr);
351
	gpu_addr = rdev->wb.gpu_addr + index;
352
 
352
 
353
	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
353
	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
354
	if (r) {
354
	if (r) {
Line 355... Line 355...
355
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
355
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
356
		return r;
356
		return r;
357
	}
357
	}
358
 
358
 
359
	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
359
	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
Line 360... Line 360...
360
	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
360
	ib.ptr[1] = lower_32_bits(gpu_addr);
361
	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
361
	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
Line 372... Line 372...
372
	if (r) {
372
	if (r) {
373
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
373
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
374
		return r;
374
		return r;
375
	}
375
	}
376
	for (i = 0; i < rdev->usec_timeout; i++) {
376
	for (i = 0; i < rdev->usec_timeout; i++) {
377
		tmp = readl(ptr);
377
		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
378
		if (tmp == 0xDEADBEEF)
378
		if (tmp == 0xDEADBEEF)
379
			break;
379
			break;
380
		DRM_UDELAY(1);
380
		DRM_UDELAY(1);
381
	}
381
	}
382
	if (i < rdev->usec_timeout) {
382
	if (i < rdev->usec_timeout) {
Line 428... Line 428...
428
 *
428
 *
429
 * @rdev: radeon_device pointer
429
 * @rdev: radeon_device pointer
430
 * @src_offset: src GPU address
430
 * @src_offset: src GPU address
431
 * @dst_offset: dst GPU address
431
 * @dst_offset: dst GPU address
432
 * @num_gpu_pages: number of GPU pages to xfer
432
 * @num_gpu_pages: number of GPU pages to xfer
433
 * @fence: radeon fence object
433
 * @resv: reservation object to sync to
434
 *
434
 *
435
 * Copy GPU paging using the DMA engine (r6xx).
435
 * Copy GPU paging using the DMA engine (r6xx).
436
 * Used by the radeon ttm implementation to move pages if
436
 * Used by the radeon ttm implementation to move pages if
437
 * registered as the asic copy callback.
437
 * registered as the asic copy callback.
438
 */
438
 */
439
int r600_copy_dma(struct radeon_device *rdev,
439
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
440
		  uint64_t src_offset, uint64_t dst_offset,
440
		  uint64_t src_offset, uint64_t dst_offset,
441
		  unsigned num_gpu_pages,
441
		  unsigned num_gpu_pages,
442
		  struct radeon_fence **fence)
442
				   struct reservation_object *resv)
443
{
443
{
444
	struct radeon_semaphore *sem = NULL;
444
	struct radeon_fence *fence;
-
 
445
	struct radeon_sync sync;
445
	int ring_index = rdev->asic->copy.dma_ring_index;
446
	int ring_index = rdev->asic->copy.dma_ring_index;
446
	struct radeon_ring *ring = &rdev->ring[ring_index];
447
	struct radeon_ring *ring = &rdev->ring[ring_index];
447
	u32 size_in_dw, cur_size_in_dw;
448
	u32 size_in_dw, cur_size_in_dw;
448
	int i, num_loops;
449
	int i, num_loops;
449
	int r = 0;
450
	int r = 0;
Line 450... Line 451...
450
 
451
 
451
	r = radeon_semaphore_create(rdev, &sem);
-
 
452
	if (r) {
-
 
453
		DRM_ERROR("radeon: moving bo (%d).\n", r);
-
 
454
		return r;
-
 
Line 455... Line 452...
455
	}
452
	radeon_sync_create(&sync);
456
 
453
 
457
	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
454
	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
458
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
455
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
459
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
456
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
460
	if (r) {
457
	if (r) {
461
		DRM_ERROR("radeon: moving bo (%d).\n", r);
458
		DRM_ERROR("radeon: moving bo (%d).\n", r);
462
		radeon_semaphore_free(rdev, &sem, NULL);
459
		radeon_sync_free(rdev, &sync, NULL);
Line 463... Line 460...
463
		return r;
460
		return ERR_PTR(r);
464
	}
461
	}
Line 465... Line 462...
465
 
462
 
466
	radeon_semaphore_sync_to(sem, *fence);
463
	radeon_sync_resv(rdev, &sync, resv, false);
467
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
464
	radeon_sync_rings(rdev, &sync, ring->idx);
468
 
465
 
Line 478... Line 475...
478
					 (upper_32_bits(src_offset) & 0xff)));
475
					 (upper_32_bits(src_offset) & 0xff)));
479
		src_offset += cur_size_in_dw * 4;
476
		src_offset += cur_size_in_dw * 4;
480
		dst_offset += cur_size_in_dw * 4;
477
		dst_offset += cur_size_in_dw * 4;
481
	}
478
	}
Line 482... Line 479...
482
 
479
 
483
	r = radeon_fence_emit(rdev, fence, ring->idx);
480
	r = radeon_fence_emit(rdev, &fence, ring->idx);
484
	if (r) {
481
	if (r) {
485
		radeon_ring_unlock_undo(rdev, ring);
482
		radeon_ring_unlock_undo(rdev, ring);
486
		radeon_semaphore_free(rdev, &sem, NULL);
483
		radeon_sync_free(rdev, &sync, NULL);
487
		return r;
484
		return ERR_PTR(r);
Line 488... Line 485...
488
	}
485
	}
489
 
486
 
Line 490... Line 487...
490
	radeon_ring_unlock_commit(rdev, ring, false);
487
	radeon_ring_unlock_commit(rdev, ring, false);
491
	radeon_semaphore_free(rdev, &sem, *fence);
488
	radeon_sync_free(rdev, &sync, fence);