Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
1
/*
1
/*
2
 * Copyright 2013 Advanced Micro Devices, Inc.
2
 * Copyright 2013 Advanced Micro Devices, Inc.
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice shall be included in
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
12
 * all copies or substantial portions of the Software.
13
 *
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * OTHER DEALINGS IN THE SOFTWARE.
20
 * OTHER DEALINGS IN THE SOFTWARE.
21
 *
21
 *
22
 * Authors: Alex Deucher
22
 * Authors: Alex Deucher
23
 */
23
 */
24
#include 
24
#include 
25
#include "radeon.h"
25
#include "radeon.h"
26
#include "radeon_asic.h"
26
#include "radeon_asic.h"
27
#include "rv770d.h"
27
#include "rv770d.h"
28
 
28
 
29
/**
29
/**
30
 * rv770_copy_dma - copy pages using the DMA engine
30
 * rv770_copy_dma - copy pages using the DMA engine
31
 *
31
 *
32
 * @rdev: radeon_device pointer
32
 * @rdev: radeon_device pointer
33
 * @src_offset: src GPU address
33
 * @src_offset: src GPU address
34
 * @dst_offset: dst GPU address
34
 * @dst_offset: dst GPU address
35
 * @num_gpu_pages: number of GPU pages to xfer
35
 * @num_gpu_pages: number of GPU pages to xfer
36
 * @fence: radeon fence object
36
 * @resv: reservation object to sync to
37
 *
37
 *
38
 * Copy GPU paging using the DMA engine (r7xx).
38
 * Copy GPU paging using the DMA engine (r7xx).
39
 * Used by the radeon ttm implementation to move pages if
39
 * Used by the radeon ttm implementation to move pages if
40
 * registered as the asic copy callback.
40
 * registered as the asic copy callback.
41
 */
41
 */
42
int rv770_copy_dma(struct radeon_device *rdev,
42
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
43
		  uint64_t src_offset, uint64_t dst_offset,
43
		  uint64_t src_offset, uint64_t dst_offset,
44
		  unsigned num_gpu_pages,
44
		  unsigned num_gpu_pages,
45
		  struct radeon_fence **fence)
45
				    struct reservation_object *resv)
46
{
46
{
47
	struct radeon_semaphore *sem = NULL;
47
	struct radeon_fence *fence;
-
 
48
	struct radeon_sync sync;
48
	int ring_index = rdev->asic->copy.dma_ring_index;
49
	int ring_index = rdev->asic->copy.dma_ring_index;
49
	struct radeon_ring *ring = &rdev->ring[ring_index];
50
	struct radeon_ring *ring = &rdev->ring[ring_index];
50
	u32 size_in_dw, cur_size_in_dw;
51
	u32 size_in_dw, cur_size_in_dw;
51
	int i, num_loops;
52
	int i, num_loops;
52
	int r = 0;
53
	int r = 0;
53
 
54
 
54
	r = radeon_semaphore_create(rdev, &sem);
-
 
55
	if (r) {
-
 
56
		DRM_ERROR("radeon: moving bo (%d).\n", r);
-
 
57
		return r;
-
 
58
	}
55
	radeon_sync_create(&sync);
59
 
56
 
60
	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
57
	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
61
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
58
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
62
	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
59
	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
63
	if (r) {
60
	if (r) {
64
		DRM_ERROR("radeon: moving bo (%d).\n", r);
61
		DRM_ERROR("radeon: moving bo (%d).\n", r);
65
		radeon_semaphore_free(rdev, &sem, NULL);
62
		radeon_sync_free(rdev, &sync, NULL);
66
		return r;
63
		return ERR_PTR(r);
67
	}
64
	}
68
 
65
 
69
	radeon_semaphore_sync_to(sem, *fence);
66
	radeon_sync_resv(rdev, &sync, resv, false);
70
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
67
	radeon_sync_rings(rdev, &sync, ring->idx);
71
 
68
 
72
	for (i = 0; i < num_loops; i++) {
69
	for (i = 0; i < num_loops; i++) {
73
		cur_size_in_dw = size_in_dw;
70
		cur_size_in_dw = size_in_dw;
74
		if (cur_size_in_dw > 0xFFFF)
71
		if (cur_size_in_dw > 0xFFFF)
75
			cur_size_in_dw = 0xFFFF;
72
			cur_size_in_dw = 0xFFFF;
76
		size_in_dw -= cur_size_in_dw;
73
		size_in_dw -= cur_size_in_dw;
77
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
74
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
78
		radeon_ring_write(ring, dst_offset & 0xfffffffc);
75
		radeon_ring_write(ring, dst_offset & 0xfffffffc);
79
		radeon_ring_write(ring, src_offset & 0xfffffffc);
76
		radeon_ring_write(ring, src_offset & 0xfffffffc);
80
		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
77
		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
81
		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
78
		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
82
		src_offset += cur_size_in_dw * 4;
79
		src_offset += cur_size_in_dw * 4;
83
		dst_offset += cur_size_in_dw * 4;
80
		dst_offset += cur_size_in_dw * 4;
84
	}
81
	}
85
 
82
 
86
	r = radeon_fence_emit(rdev, fence, ring->idx);
83
	r = radeon_fence_emit(rdev, &fence, ring->idx);
87
	if (r) {
84
	if (r) {
88
		radeon_ring_unlock_undo(rdev, ring);
85
		radeon_ring_unlock_undo(rdev, ring);
89
		radeon_semaphore_free(rdev, &sem, NULL);
86
		radeon_sync_free(rdev, &sync, NULL);
90
		return r;
87
		return ERR_PTR(r);
91
	}
88
	}
92
 
89
 
93
	radeon_ring_unlock_commit(rdev, ring, false);
90
	radeon_ring_unlock_commit(rdev, ring, false);
94
	radeon_semaphore_free(rdev, &sem, *fence);
91
	radeon_sync_free(rdev, &sync, fence);
95
 
92
 
96
	return r;
93
	return fence;
97
}
94
}